summaryrefslogtreecommitdiffstats
path: root/src/backend/optimizer
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer')
-rw-r--r--src/backend/optimizer/Makefile13
-rw-r--r--src/backend/optimizer/README1160
-rw-r--r--src/backend/optimizer/geqo/Makefile33
-rw-r--r--src/backend/optimizer/geqo/geqo_copy.c54
-rw-r--r--src/backend/optimizer/geqo/geqo_cx.c123
-rw-r--r--src/backend/optimizer/geqo/geqo_erx.c468
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c338
-rw-r--r--src/backend/optimizer/geqo/geqo_main.c353
-rw-r--r--src/backend/optimizer/geqo/geqo_misc.c132
-rw-r--r--src/backend/optimizer/geqo/geqo_mutation.c66
-rw-r--r--src/backend/optimizer/geqo/geqo_ox1.c94
-rw-r--r--src/backend/optimizer/geqo/geqo_ox2.c111
-rw-r--r--src/backend/optimizer/geqo/geqo_pmx.c218
-rw-r--r--src/backend/optimizer/geqo/geqo_pool.c265
-rw-r--r--src/backend/optimizer/geqo/geqo_px.c107
-rw-r--r--src/backend/optimizer/geqo/geqo_random.c45
-rw-r--r--src/backend/optimizer/geqo/geqo_recombination.c92
-rw-r--r--src/backend/optimizer/geqo/geqo_selection.c111
-rw-r--r--src/backend/optimizer/path/Makefile26
-rw-r--r--src/backend/optimizer/path/allpaths.c4668
-rw-r--r--src/backend/optimizer/path/clausesel.c1000
-rw-r--r--src/backend/optimizer/path/costsize.c6221
-rw-r--r--src/backend/optimizer/path/equivclass.c3226
-rw-r--r--src/backend/optimizer/path/indxpath.c3817
-rw-r--r--src/backend/optimizer/path/joinpath.c2367
-rw-r--r--src/backend/optimizer/path/joinrels.c1783
-rw-r--r--src/backend/optimizer/path/pathkeys.c1917
-rw-r--r--src/backend/optimizer/path/tidpath.c528
-rw-r--r--src/backend/optimizer/plan/Makefile25
-rw-r--r--src/backend/optimizer/plan/README158
-rw-r--r--src/backend/optimizer/plan/analyzejoins.c1127
-rw-r--r--src/backend/optimizer/plan/createplan.c7244
-rw-r--r--src/backend/optimizer/plan/initsplan.c2752
-rw-r--r--src/backend/optimizer/plan/planagg.c513
-rw-r--r--src/backend/optimizer/plan/planmain.c284
-rw-r--r--src/backend/optimizer/plan/planner.c7492
-rw-r--r--src/backend/optimizer/plan/setrefs.c3398
-rw-r--r--src/backend/optimizer/plan/subselect.c2999
-rw-r--r--src/backend/optimizer/prep/Makefile22
-rw-r--r--src/backend/optimizer/prep/prepagg.c674
-rw-r--r--src/backend/optimizer/prep/prepjointree.c3746
-rw-r--r--src/backend/optimizer/prep/prepqual.c677
-rw-r--r--src/backend/optimizer/prep/preptlist.c497
-rw-r--r--src/backend/optimizer/prep/prepunion.c1420
-rw-r--r--src/backend/optimizer/util/Makefile31
-rw-r--r--src/backend/optimizer/util/appendinfo.c1011
-rw-r--r--src/backend/optimizer/util/clauses.c5255
-rw-r--r--src/backend/optimizer/util/inherit.c949
-rw-r--r--src/backend/optimizer/util/joininfo.c140
-rw-r--r--src/backend/optimizer/util/orclauses.c360
-rw-r--r--src/backend/optimizer/util/paramassign.c591
-rw-r--r--src/backend/optimizer/util/pathnode.c4298
-rw-r--r--src/backend/optimizer/util/placeholder.c477
-rw-r--r--src/backend/optimizer/util/plancat.c2509
-rw-r--r--src/backend/optimizer/util/predtest.c2224
-rw-r--r--src/backend/optimizer/util/relnode.c2047
-rw-r--r--src/backend/optimizer/util/restrictinfo.c655
-rw-r--r--src/backend/optimizer/util/tlist.c1258
-rw-r--r--src/backend/optimizer/util/var.c903
59 files changed, 85072 insertions, 0 deletions
diff --git a/src/backend/optimizer/Makefile b/src/backend/optimizer/Makefile
new file mode 100644
index 0000000..f523e5e
--- /dev/null
+++ b/src/backend/optimizer/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for optimizer
+#
+# src/backend/optimizer/Makefile
+#
+
+subdir = src/backend/optimizer
+top_builddir = ../../..
+include $(top_builddir)/src/Makefile.global
+
+SUBDIRS = geqo path plan prep util
+
+include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/optimizer/README b/src/backend/optimizer/README
new file mode 100644
index 0000000..41c120e
--- /dev/null
+++ b/src/backend/optimizer/README
@@ -0,0 +1,1160 @@
+src/backend/optimizer/README
+
+Optimizer
+=========
+
+These directories take the Query structure returned by the parser, and
+generate a plan used by the executor. The /plan directory generates the
+actual output plan, the /path code generates all possible ways to join the
+tables, and /prep handles various preprocessing steps for special cases.
+/util is utility stuff. /geqo is the separate "genetic optimization" planner
+--- it does a semi-random search through the join tree space, rather than
+exhaustively considering all possible join trees. (But each join considered
+by /geqo is given to /path to create paths for, so we consider all possible
+implementation paths for each specific join pair even in GEQO mode.)
+
+
+Paths and Join Pairs
+--------------------
+
+During the planning/optimizing process, we build "Path" trees representing
+the different ways of doing a query. We select the cheapest Path that
+generates the desired relation and turn it into a Plan to pass to the
+executor. (There is pretty nearly a one-to-one correspondence between the
+Path and Plan trees, but Path nodes omit info that won't be needed during
+planning, and include info needed for planning that won't be needed by the
+executor.)
+
+The optimizer builds a RelOptInfo structure for each base relation used in
+the query. Base rels are either primitive tables, or subquery subselects
+that are planned via a separate recursive invocation of the planner. A
+RelOptInfo is also built for each join relation that is considered during
+planning. A join rel is simply a combination of base rels. There is only
+one join RelOptInfo for any given set of baserels --- for example, the join
+{A B C} is represented by the same RelOptInfo no matter whether we build it
+by joining A and B first and then adding C, or joining B and C first and
+then adding A, etc. These different means of building the joinrel are
+represented as Paths. For each RelOptInfo we build a list of Paths that
+represent plausible ways to implement the scan or join of that relation.
+Once we've considered all the plausible Paths for a rel, we select the one
+that is cheapest according to the planner's cost estimates. The final plan
+is derived from the cheapest Path for the RelOptInfo that includes all the
+base rels of the query.
+
+Possible Paths for a primitive table relation include plain old sequential
+scan, plus index scans for any indexes that exist on the table, plus bitmap
+index scans using one or more indexes. Specialized RTE types, such as
+function RTEs, may have only one possible Path.
+
+Joins always occur using two RelOptInfos. One is outer, the other inner.
+Outers drive lookups of values in the inner. In a nested loop, lookups of
+values in the inner occur by scanning the inner path once per outer tuple
+to find each matching inner row. In a mergejoin, inner and outer rows are
+ordered, and are accessed in order, so only one scan is required to perform
+the entire join: both inner and outer paths are scanned in-sync. (There's
+not a lot of difference between inner and outer in a mergejoin...) In a
+hashjoin, the inner is scanned first and all its rows are entered in a
+hashtable, then the outer is scanned and for each row we lookup the join
+key in the hashtable.
+
+A Path for a join relation is actually a tree structure, with the topmost
+Path node representing the last-applied join method. It has left and right
+subpaths that represent the scan or join methods used for the two input
+relations.
+
+
+Join Tree Construction
+----------------------
+
+The optimizer generates optimal query plans by doing a more-or-less
+exhaustive search through the ways of executing the query. The best Path
+tree is found by a recursive process:
+
+1) Take each base relation in the query, and make a RelOptInfo structure
+for it. Find each potentially useful way of accessing the relation,
+including sequential and index scans, and make Paths representing those
+ways. All the Paths made for a given relation are placed in its
+RelOptInfo.pathlist. (Actually, we discard Paths that are obviously
+inferior alternatives before they ever get into the pathlist --- what
+ends up in the pathlist is the cheapest way of generating each potentially
+useful sort ordering and parameterization of the relation.) Also create a
+RelOptInfo.joininfo list including all the join clauses that involve this
+relation. For example, the WHERE clause "tab1.col1 = tab2.col1" generates
+entries in both tab1 and tab2's joininfo lists.
+
+If we have only a single base relation in the query, we are done.
+Otherwise we have to figure out how to join the base relations into a
+single join relation.
+
+2) Normally, any explicit JOIN clauses are "flattened" so that we just
+have a list of relations to join. However, FULL OUTER JOIN clauses are
+never flattened, and other kinds of JOIN might not be either, if the
+flattening process is stopped by join_collapse_limit or from_collapse_limit
+restrictions. Therefore, we end up with a planning problem that contains
+lists of relations to be joined in any order, where any individual item
+might be a sub-list that has to be joined together before we can consider
+joining it to its siblings. We process these sub-problems recursively,
+bottom up. Note that the join list structure constrains the possible join
+orders, but it doesn't constrain the join implementation method at each
+join (nestloop, merge, hash), nor does it say which rel is considered outer
+or inner at each join. We consider all these possibilities in building
+Paths. We generate a Path for each feasible join method, and select the
+cheapest Path.
+
+For each planning problem, therefore, we will have a list of relations
+that are either base rels or joinrels constructed per sub-join-lists.
+We can join these rels together in any order the planner sees fit.
+The standard (non-GEQO) planner does this as follows:
+
+Consider joining each RelOptInfo to each other RelOptInfo for which there
+is a usable joinclause, and generate a Path for each possible join method
+for each such pair. (If we have a RelOptInfo with no join clauses, we have
+no choice but to generate a clauseless Cartesian-product join; so we
+consider joining that rel to each other available rel. But in the presence
+of join clauses we will only consider joins that use available join
+clauses. Note that join-order restrictions induced by outer joins and
+IN/EXISTS clauses are also checked, to ensure that we find a workable join
+order in cases where those restrictions force a clauseless join to be done.)
+
+If we only had two relations in the list, we are done: we just pick
+the cheapest path for the join RelOptInfo. If we had more than two, we now
+need to consider ways of joining join RelOptInfos to each other to make
+join RelOptInfos that represent more than two list items.
+
+The join tree is constructed using a "dynamic programming" algorithm:
+in the first pass (already described) we consider ways to create join rels
+representing exactly two list items. The second pass considers ways
+to make join rels that represent exactly three list items; the next pass,
+four items, etc. The last pass considers how to make the final join
+relation that includes all list items --- obviously there can be only one
+join rel at this top level, whereas there can be more than one join rel
+at lower levels. At each level we use joins that follow available join
+clauses, if possible, just as described for the first level.
+
+For example:
+
+ SELECT *
+ FROM tab1, tab2, tab3, tab4
+ WHERE tab1.col = tab2.col AND
+ tab2.col = tab3.col AND
+ tab3.col = tab4.col
+
+ Tables 1, 2, 3, and 4 are joined as:
+ {1 2},{2 3},{3 4}
+ {1 2 3},{2 3 4}
+ {1 2 3 4}
+ (other possibilities will be excluded for lack of join clauses)
+
+ SELECT *
+ FROM tab1, tab2, tab3, tab4
+ WHERE tab1.col = tab2.col AND
+ tab1.col = tab3.col AND
+ tab1.col = tab4.col
+
+ Tables 1, 2, 3, and 4 are joined as:
+ {1 2},{1 3},{1 4}
+ {1 2 3},{1 3 4},{1 2 4}
+ {1 2 3 4}
+
+We consider left-handed plans (the outer rel of an upper join is a joinrel,
+but the inner is always a single list item); right-handed plans (outer rel
+is always a single item); and bushy plans (both inner and outer can be
+joins themselves). For example, when building {1 2 3 4} we consider
+joining {1 2 3} to {4} (left-handed), {4} to {1 2 3} (right-handed), and
+{1 2} to {3 4} (bushy), among other choices. Although the jointree
+scanning code produces these potential join combinations one at a time,
+all the ways to produce the same set of joined base rels will share the
+same RelOptInfo, so the paths produced from different join combinations
+that produce equivalent joinrels will compete in add_path().
+
+The dynamic-programming approach has an important property that's not
+immediately obvious: we will finish constructing all paths for a given
+relation before we construct any paths for relations containing that rel.
+This means that we can reliably identify the "cheapest path" for each rel
+before higher-level relations need to know that. Also, we can safely
+discard a path when we find that another path for the same rel is better,
+without worrying that maybe there is already a reference to that path in
+some higher-level join path. Without this, memory management for paths
+would be much more complicated.
+
+Once we have built the final join rel, we use either the cheapest path
+for it or the cheapest path with the desired ordering (if that's cheaper
+than applying a sort to the cheapest other path).
+
+If the query contains one-sided outer joins (LEFT or RIGHT joins), or
+IN or EXISTS WHERE clauses that were converted to semijoins or antijoins,
+then some of the possible join orders may be illegal. These are excluded
+by having join_is_legal consult a side list of such "special" joins to see
+whether a proposed join is illegal. (The same consultation allows it to
+see which join style should be applied for a valid join, ie, JOIN_INNER,
+JOIN_LEFT, etc.)
+
+
+Valid OUTER JOIN Optimizations
+------------------------------
+
+The planner's treatment of outer join reordering is based on the following
+identities:
+
+1. (A leftjoin B on (Pab)) innerjoin C on (Pac)
+ = (A innerjoin C on (Pac)) leftjoin B on (Pab)
+
+where Pac is a predicate referencing A and C, etc (in this case, clearly
+Pac cannot reference B, or the transformation is nonsensical).
+
+2. (A leftjoin B on (Pab)) leftjoin C on (Pac)
+ = (A leftjoin C on (Pac)) leftjoin B on (Pab)
+
+3. (A leftjoin B on (Pab)) leftjoin C on (Pbc)
+ = A leftjoin (B leftjoin C on (Pbc)) on (Pab)
+
+Identity 3 only holds if predicate Pbc must fail for all-null B rows
+(that is, Pbc is strict for at least one column of B). If Pbc is not
+strict, the first form might produce some rows with nonnull C columns
+where the second form would make those entries null.
+
+RIGHT JOIN is equivalent to LEFT JOIN after switching the two input
+tables, so the same identities work for right joins.
+
+An example of a case that does *not* work is moving an innerjoin into or
+out of the nullable side of an outer join:
+
+ A leftjoin (B join C on (Pbc)) on (Pab)
+ != (A leftjoin B on (Pab)) join C on (Pbc)
+
+SEMI joins work a little bit differently. A semijoin can be reassociated
+into or out of the lefthand side of another semijoin, left join, or
+antijoin, but not into or out of the righthand side. Likewise, an inner
+join, left join, or antijoin can be reassociated into or out of the
+lefthand side of a semijoin, but not into or out of the righthand side.
+
+ANTI joins work approximately like LEFT joins, except that identity 3
+fails if the join to C is an antijoin (even if Pbc is strict, and in
+both the cases where the other join is a leftjoin and where it is an
+antijoin). So we can't reorder antijoins into or out of the RHS of a
+leftjoin or antijoin, even if the relevant clause is strict.
+
+The current code does not attempt to re-order FULL JOINs at all.
+FULL JOIN ordering is enforced by not collapsing FULL JOIN nodes when
+translating the jointree to "joinlist" representation. Other types of
+JOIN nodes are normally collapsed so that they participate fully in the
+join order search. To avoid generating illegal join orders, the planner
+creates a SpecialJoinInfo node for each non-inner join, and join_is_legal
+checks this list to decide if a proposed join is legal.
+
+What we store in SpecialJoinInfo nodes are the minimum sets of Relids
+required on each side of the join to form the outer join. Note that
+these are minimums; there's no explicit maximum, since joining other
+rels to the OJ's syntactic rels may be legal. Per identities 1 and 2,
+non-FULL joins can be freely associated into the lefthand side of an
+OJ, but in some cases they can't be associated into the righthand side.
+So the restriction enforced by join_is_legal is that a proposed join
+can't join a rel within or partly within an RHS boundary to one outside
+the boundary, unless the proposed join is a LEFT join that can associate
+into the SpecialJoinInfo's RHS using identity 3.
+
+The use of minimum Relid sets has some pitfalls; consider a query like
+ A leftjoin (B leftjoin (C innerjoin D) on (Pbcd)) on Pa
+where Pa doesn't mention B/C/D at all. In this case a naive computation
+would give the upper leftjoin's min LHS as {A} and min RHS as {C,D} (since
+we know that the innerjoin can't associate out of the leftjoin's RHS, and
+enforce that by including its relids in the leftjoin's min RHS). And the
+lower leftjoin has min LHS of {B} and min RHS of {C,D}. Given such
+information, join_is_legal would think it's okay to associate the upper
+join into the lower join's RHS, transforming the query to
+ B leftjoin (A leftjoin (C innerjoin D) on Pa) on (Pbcd)
+which yields totally wrong answers. We prevent that by forcing the min RHS
+for the upper join to include B. This is perhaps overly restrictive, but
+such cases don't arise often so it's not clear that it's worth developing a
+more complicated system.
+
+
+Pulling Up Subqueries
+---------------------
+
+As we described above, a subquery appearing in the range table is planned
+independently and treated as a "black box" during planning of the outer
+query. This is necessary when the subquery uses features such as
+aggregates, GROUP, or DISTINCT. But if the subquery is just a simple
+scan or join, treating the subquery as a black box may produce a poor plan
+compared to considering it as part of the entire plan search space.
+Therefore, at the start of the planning process the planner looks for
+simple subqueries and pulls them up into the main query's jointree.
+
+Pulling up a subquery may result in FROM-list joins appearing below the top
+of the join tree. Each FROM-list is planned using the dynamic-programming
+search method described above.
+
+If pulling up a subquery produces a FROM-list as a direct child of another
+FROM-list, then we can merge the two FROM-lists together. Once that's
+done, the subquery is an absolutely integral part of the outer query and
+will not constrain the join tree search space at all. However, that could
+result in unpleasant growth of planning time, since the dynamic-programming
+search has runtime exponential in the number of FROM-items considered.
+Therefore, we don't merge FROM-lists if the result would have too many
+FROM-items in one list.
+
+
+Optimizer Functions
+-------------------
+
+The primary entry point is planner().
+
+planner()
+set up for recursive handling of subqueries
+-subquery_planner()
+ pull up sublinks and subqueries from rangetable, if possible
+ canonicalize qual
+ Attempt to simplify WHERE clause to the most useful form; this includes
+ flattening nested AND/ORs and detecting clauses that are duplicated in
+ different branches of an OR.
+ simplify constant expressions
+ process sublinks
+ convert Vars of outer query levels into Params
+--grouping_planner()
+ preprocess target list for non-SELECT queries
+ handle UNION/INTERSECT/EXCEPT, GROUP BY, HAVING, aggregates,
+ ORDER BY, DISTINCT, LIMIT
+---query_planner()
+ make list of base relations used in query
+ split up the qual into restrictions (a=1) and joins (b=c)
+ find qual clauses that enable merge and hash joins
+----make_one_rel()
+ set_base_rel_pathlists()
+ find seqscan and all index paths for each base relation
+ find selectivity of columns used in joins
+ make_rel_from_joinlist()
+ hand off join subproblems to a plugin, GEQO, or standard_join_search()
+------standard_join_search()
+ call join_search_one_level() for each level of join tree needed
+ join_search_one_level():
+ For each joinrel of the prior level, do make_rels_by_clause_joins()
+ if it has join clauses, or make_rels_by_clauseless_joins() if not.
+ Also generate "bushy plan" joins between joinrels of lower levels.
+ Back at standard_join_search(), generate gather paths if needed for
+ each newly constructed joinrel, then apply set_cheapest() to extract
+ the cheapest path for it.
+ Loop back if this wasn't the top join level.
+ Back at grouping_planner:
+ do grouping (GROUP BY) and aggregation
+ do window functions
+ make unique (DISTINCT)
+ do sorting (ORDER BY)
+ do limit (LIMIT/OFFSET)
+Back at planner():
+convert finished Path tree into a Plan tree
+do final cleanup after planning
+
+
+Optimizer Data Structures
+-------------------------
+
+PlannerGlobal - global information for a single planner invocation
+
+PlannerInfo - information for planning a particular Query (we make
+ a separate PlannerInfo node for each sub-Query)
+
+RelOptInfo - a relation or joined relations
+
+ RestrictInfo - WHERE clauses, like "x = 3" or "y = z"
+ (note the same structure is used for restriction and
+ join clauses)
+
+ Path - every way to generate a RelOptInfo(sequential,index,joins)
+ A plain Path node can represent several simple plans, per its pathtype:
+ T_SeqScan - sequential scan
+ T_SampleScan - tablesample scan
+ T_FunctionScan - function-in-FROM scan
+ T_TableFuncScan - table function scan
+ T_ValuesScan - VALUES scan
+ T_CteScan - CTE (WITH) scan
+ T_NamedTuplestoreScan - ENR scan
+ T_WorkTableScan - scan worktable of a recursive CTE
+ T_Result - childless Result plan node (used for FROM-less SELECT)
+ IndexPath - index scan
+ BitmapHeapPath - top of a bitmapped index scan
+ TidPath - scan by CTID
+ TidRangePath - scan a contiguous range of CTIDs
+ SubqueryScanPath - scan a subquery-in-FROM
+ ForeignPath - scan a foreign table, foreign join or foreign upper-relation
+ CustomPath - for custom scan providers
+ AppendPath - append multiple subpaths together
+ MergeAppendPath - merge multiple subpaths, preserving their common sort order
+ GroupResultPath - childless Result plan node (used for degenerate grouping)
+ MaterialPath - a Material plan node
+ MemoizePath - a Memoize plan node for caching tuples from sub-paths
+ UniquePath - remove duplicate rows (either by hashing or sorting)
+ GatherPath - collect the results of parallel workers
+ GatherMergePath - collect parallel results, preserving their common sort order
+ ProjectionPath - a Result plan node with child (used for projection)
+ ProjectSetPath - a ProjectSet plan node applied to some sub-path
+ SortPath - a Sort plan node applied to some sub-path
+ IncrementalSortPath - an IncrementalSort plan node applied to some sub-path
+ GroupPath - a Group plan node applied to some sub-path
+ UpperUniquePath - a Unique plan node applied to some sub-path
+ AggPath - an Agg plan node applied to some sub-path
+ GroupingSetsPath - an Agg plan node used to implement GROUPING SETS
+ MinMaxAggPath - a Result plan node with subplans performing MIN/MAX
+ WindowAggPath - a WindowAgg plan node applied to some sub-path
+ SetOpPath - a SetOp plan node applied to some sub-path
+ RecursiveUnionPath - a RecursiveUnion plan node applied to two sub-paths
+ LockRowsPath - a LockRows plan node applied to some sub-path
+ ModifyTablePath - a ModifyTable plan node applied to some sub-path(s)
+ LimitPath - a Limit plan node applied to some sub-path
+ NestPath - nested-loop joins
+ MergePath - merge joins
+ HashPath - hash joins
+
+ EquivalenceClass - a data structure representing a set of values known equal
+
+ PathKey - a data structure representing the sort ordering of a path
+
+The optimizer spends a good deal of its time worrying about the ordering
+of the tuples returned by a path. The reason this is useful is that by
+knowing the sort ordering of a path, we may be able to use that path as
+the left or right input of a mergejoin and avoid an explicit sort step.
+Nestloops and hash joins don't really care what the order of their inputs
+is, but mergejoin needs suitably ordered inputs. Therefore, all paths
+generated during the optimization process are marked with their sort order
+(to the extent that it is known) for possible use by a higher-level merge.
+
+It is also possible to avoid an explicit sort step to implement a user's
+ORDER BY clause if the final path has the right ordering already, so the
+sort ordering is of interest even at the top level. grouping_planner() will
+look for the cheapest path with a sort order matching the desired order,
+then compare its cost to the cost of using the cheapest-overall path and
+doing an explicit sort on that.
+
+When we are generating paths for a particular RelOptInfo, we discard a path
+if it is more expensive than another known path that has the same or better
+sort order. We will never discard a path that is the only known way to
+achieve a given sort order (without an explicit sort, that is). In this
+way, the next level up will have the maximum freedom to build mergejoins
+without sorting, since it can pick from any of the paths retained for its
+inputs.
+
+
+EquivalenceClasses
+------------------
+
+During the deconstruct_jointree() scan of the query's qual clauses, we look
+for mergejoinable equality clauses A = B whose applicability is not delayed
+by an outer join; these are called "equivalence clauses". When we find
+one, we create an EquivalenceClass containing the expressions A and B to
+record this knowledge. If we later find another equivalence clause B = C,
+we add C to the existing EquivalenceClass for {A B}; this may require
+merging two existing EquivalenceClasses. At the end of the scan, we have
+sets of values that are known all transitively equal to each other. We can
+therefore use a comparison of any pair of the values as a restriction or
+join clause (when these values are available at the scan or join, of
+course); furthermore, we need test only one such comparison, not all of
+them. Therefore, equivalence clauses are removed from the standard qual
+distribution process. Instead, when preparing a restriction or join clause
+list, we examine each EquivalenceClass to see if it can contribute a
+clause, and if so we select an appropriate pair of values to compare. For
+example, if we are trying to join A's relation to C's, we can generate the
+clause A = C, even though this appeared nowhere explicitly in the original
+query. This may allow us to explore join paths that otherwise would have
+been rejected as requiring Cartesian-product joins.
+
+Sometimes an EquivalenceClass may contain a pseudo-constant expression
+(i.e., one not containing Vars or Aggs of the current query level, nor
+volatile functions). In this case we do not follow the policy of
+dynamically generating join clauses: instead, we dynamically generate
+restriction clauses "var = const" wherever one of the variable members of
+the class can first be computed. For example, if we have A = B and B = 42,
+we effectively generate the restriction clauses A = 42 and B = 42, and then
+we need not bother with explicitly testing the join clause A = B when the
+relations are joined. In effect, all the class members can be tested at
+relation-scan level and there's never a need for join tests.
+
+The precise technical interpretation of an EquivalenceClass is that it
+asserts that at any plan node where more than one of its member values
+can be computed, output rows in which the values are not all equal may
+be discarded without affecting the query result. (We require all levels
+of the plan to enforce EquivalenceClasses, hence a join need not recheck
+equality of values that were computable by one of its children.) For an
+ordinary EquivalenceClass that is "valid everywhere", we can further infer
+that the values are all non-null, because all mergejoinable operators are
+strict. However, we also allow equivalence clauses that appear below the
+nullable side of an outer join to form EquivalenceClasses; for these
+classes, the interpretation is that either all the values are equal, or
+all (except pseudo-constants) have gone to null. (This requires a
+limitation that non-constant members be strict, else they might not go
+to null when the other members do.) Consider for example
+
+ SELECT *
+ FROM a LEFT JOIN
+ (SELECT * FROM b JOIN c ON b.y = c.z WHERE b.y = 10) ss
+ ON a.x = ss.y
+ WHERE a.x = 42;
+
+We can form the below-outer-join EquivalenceClass {b.y c.z 10} and thereby
+apply c.z = 10 while scanning c. (The reason we disallow outerjoin-delayed
+clauses from forming EquivalenceClasses is exactly that we want to be able
+to push any derived clauses as far down as possible.) But once above the
+outer join it's no longer necessarily the case that b.y = 10, and thus we
+cannot use such EquivalenceClasses to conclude that sorting is unnecessary
+(see discussion of PathKeys below).
+
+In this example, notice also that a.x = ss.y (really a.x = b.y) is not an
+equivalence clause because its applicability to b is delayed by the outer
+join; thus we do not try to insert b.y into the equivalence class {a.x 42}.
+But since we see that a.x has been equated to 42 above the outer join, we
+are able to form a below-outer-join class {b.y 42}; this restriction can be
+added because no b/c row not having b.y = 42 can contribute to the result
+of the outer join, and so we need not compute such rows. Now this class
+will get merged with {b.y c.z 10}, leading to the contradiction 10 = 42,
+which lets the planner deduce that the b/c join need not be computed at all
+because none of its rows can contribute to the outer join. (This gets
+implemented as a gating Result filter, since more usually the potential
+contradiction involves Param values rather than just Consts, and thus has
+to be checked at runtime.)
+
+To aid in determining the sort ordering(s) that can work with a mergejoin,
+we mark each mergejoinable clause with the EquivalenceClasses of its left
+and right inputs. For an equivalence clause, these are of course the same
+EquivalenceClass. For a non-equivalence mergejoinable clause (such as an
+outer-join qualification), we generate two separate EquivalenceClasses for
+the left and right inputs. This may result in creating single-item
+equivalence "classes", though of course these are still subject to merging
+if other equivalence clauses are later found to bear on the same
+expressions.
+
+Another way that we may form a single-item EquivalenceClass is in creation
+of a PathKey to represent a desired sort order (see below). This is a bit
+different from the above cases because such an EquivalenceClass might
+contain an aggregate function or volatile expression. (A clause containing
+a volatile function will never be considered mergejoinable, even if its top
+operator is mergejoinable, so there is no way for a volatile expression to
+get into EquivalenceClasses otherwise. Aggregates are disallowed in WHERE
+altogether, so will never be found in a mergejoinable clause.) This is just
+a convenience to maintain a uniform PathKey representation: such an
+EquivalenceClass will never be merged with any other. Note in particular
+that a single-item EquivalenceClass {a.x} is *not* meant to imply an
+assertion that a.x = a.x; the practical effect of this is that a.x could
+be NULL.
+
+An EquivalenceClass also contains a list of btree opfamily OIDs, which
+determines what the equalities it represents actually "mean". All the
+equivalence clauses that contribute to an EquivalenceClass must have
+equality operators that belong to the same set of opfamilies. (Note: most
+of the time, a particular equality operator belongs to only one family, but
+it's possible that it belongs to more than one. We keep track of all the
+families to ensure that we can make use of an index belonging to any one of
+the families for mergejoin purposes.)
+
+An EquivalenceClass can contain "em_is_child" members, which are copies
+of members that contain appendrel parent relation Vars, transposed to
+contain the equivalent child-relation variables or expressions. These
+members are *not* full-fledged members of the EquivalenceClass and do not
+affect the class's overall properties at all. They are kept only to
+simplify matching of child-relation expressions to EquivalenceClasses.
+Most operations on EquivalenceClasses should ignore child members.
+
+
+PathKeys
+--------
+
+The PathKeys data structure represents what is known about the sort order
+of the tuples generated by a particular Path. A path's pathkeys field is a
+list of PathKey nodes, where the n'th item represents the n'th sort key of
+the result. Each PathKey contains these fields:
+
+ * a reference to an EquivalenceClass
+ * a btree opfamily OID (must match one of those in the EC)
+ * a sort direction (ascending or descending)
+ * a nulls-first-or-last flag
+
+The EquivalenceClass represents the value being sorted on. Since the
+various members of an EquivalenceClass are known equal according to the
+opfamily, we can consider a path sorted by any one of them to be sorted by
+any other too; this is what justifies referencing the whole
+EquivalenceClass rather than just one member of it.
+
+In single/base relation RelOptInfo's, the Paths represent various ways
+of scanning the relation and the resulting ordering of the tuples.
+Sequential scan Paths have NIL pathkeys, indicating no known ordering.
+Index scans have Path.pathkeys that represent the chosen index's ordering,
+if any. A single-key index would create a single-PathKey list, while a
+multi-column index generates a list with one element per key index column.
+Non-key columns specified in the INCLUDE clause of covering indexes don't
+have corresponding PathKeys in the list, because the have no influence on
+index ordering. (Actually, since an index can be scanned either forward or
+backward, there are two possible sort orders and two possible PathKey lists
+it can generate.)
+
+Note that a bitmap scan has NIL pathkeys since we can say nothing about
+the overall order of its result. Also, an indexscan on an unordered type
+of index generates NIL pathkeys. However, we can always create a pathkey
+by doing an explicit sort. The pathkeys for a Sort plan's output just
+represent the sort key fields and the ordering operators used.
+
+Things get more interesting when we consider joins. Suppose we do a
+mergejoin between A and B using the mergeclause A.X = B.Y. The output
+of the mergejoin is sorted by X --- but it is also sorted by Y. Again,
+this can be represented by a PathKey referencing an EquivalenceClass
+containing both X and Y.
+
+With a little further thought, it becomes apparent that nestloop joins
+can also produce sorted output. For example, if we do a nestloop join
+between outer relation A and inner relation B, then any pathkeys relevant
+to A are still valid for the join result: we have not altered the order of
+the tuples from A. Even more interesting, if there was an equivalence clause
+A.X=B.Y, and A.X was a pathkey for the outer relation A, then we can assert
+that B.Y is a pathkey for the join result; X was ordered before and still
+is, and the joined values of Y are equal to the joined values of X, so Y
+must now be ordered too. This is true even though we used neither an
+explicit sort nor a mergejoin on Y. (Note: hash joins cannot be counted
+on to preserve the order of their outer relation, because the executor
+might decide to "batch" the join, so we always set pathkeys to NIL for
+a hashjoin path.) Exception: a RIGHT or FULL join doesn't preserve the
+ordering of its outer relation, because it might insert nulls at random
+points in the ordering.
+
+In general, we can justify using EquivalenceClasses as the basis for
+pathkeys because, whenever we scan a relation containing multiple
+EquivalenceClass members or join two relations each containing
+EquivalenceClass members, we apply restriction or join clauses derived from
+the EquivalenceClass. This guarantees that any two values listed in the
+EquivalenceClass are in fact equal in all tuples emitted by the scan or
+join, and therefore that if the tuples are sorted by one of the values,
+they can be considered sorted by any other as well. It does not matter
+whether the test clause is used as a mergeclause, or merely enforced
+after-the-fact as a qpqual filter.
+
+Note that there is no particular difficulty in labeling a path's sort
+order with a PathKey referencing an EquivalenceClass that contains
+variables not yet joined into the path's output. We can simply ignore
+such entries as not being relevant (yet). This makes it possible to
+use the same EquivalenceClasses throughout the join planning process.
+In fact, by being careful not to generate multiple identical PathKey
+objects, we can reduce comparison of EquivalenceClasses and PathKeys
+to simple pointer comparison, which is a huge savings because add_path
+has to make a large number of PathKey comparisons in deciding whether
+competing Paths are equivalently sorted.
+
+Pathkeys are also useful to represent an ordering that we wish to achieve,
+since they are easily compared to the pathkeys of a potential candidate
+path. So, SortGroupClause lists are turned into pathkeys lists for use
+inside the optimizer.
+
+An additional refinement we can make is to insist that canonical pathkey
+lists (sort orderings) do not mention the same EquivalenceClass more than
+once. For example, in all these cases the second sort column is redundant,
+because it cannot distinguish values that are the same according to the
+first sort column:
+ SELECT ... ORDER BY x, x
+ SELECT ... ORDER BY x, x DESC
+ SELECT ... WHERE x = y ORDER BY x, y
+Although a user probably wouldn't write "ORDER BY x,x" directly, such
+redundancies are more probable once equivalence classes have been
+considered. Also, the system may generate redundant pathkey lists when
+computing the sort ordering needed for a mergejoin. By eliminating the
+redundancy, we save time and improve planning, since the planner will more
+easily recognize equivalent orderings as being equivalent.
+
+Another interesting property is that if the underlying EquivalenceClass
+contains a constant and is not below an outer join, then the pathkey is
+completely redundant and need not be sorted by at all! Every row must
+contain the same constant value, so there's no need to sort. (If the EC is
+below an outer join, we still have to sort, since some of the rows might
+have gone to null and others not. In this case we must be careful to pick
+a non-const member to sort by. The assumption that all the non-const
+members go to null at the same plan level is critical here, else they might
+not produce the same sort order.) This might seem pointless because users
+are unlikely to write "... WHERE x = 42 ORDER BY x", but it allows us to
+recognize when particular index columns are irrelevant to the sort order:
+if we have "... WHERE x = 42 ORDER BY y", scanning an index on (x,y)
+produces correctly ordered data without a sort step. We used to have very
+ugly ad-hoc code to recognize that in limited contexts, but discarding
+constant ECs from pathkeys makes it happen cleanly and automatically.
+
+You might object that a below-outer-join EquivalenceClass doesn't always
+represent the same values at every level of the join tree, and so using
+it to uniquely identify a sort order is dubious. This is true, but we
+can avoid dealing with the fact explicitly because we always consider that
+an outer join destroys any ordering of its nullable inputs. Thus, even
+if a path was sorted by {a.x} below an outer join, we'll re-sort if that
+sort ordering was important; and so using the same PathKey for both sort
+orderings doesn't create any real problem.
+
+
+Order of processing for EquivalenceClasses and PathKeys
+-------------------------------------------------------
+
+As alluded to above, there is a specific sequence of phases in the
+processing of EquivalenceClasses and PathKeys during planning. During the
+initial scanning of the query's quals (deconstruct_jointree followed by
+reconsider_outer_join_clauses), we construct EquivalenceClasses based on
+mergejoinable clauses found in the quals. At the end of this process,
+we know all we can know about equivalence of different variables, so
+subsequently there will be no further merging of EquivalenceClasses.
+At that point it is possible to consider the EquivalenceClasses as
+"canonical" and build canonical PathKeys that reference them. At this
+time we construct PathKeys for the query's ORDER BY and related clauses.
+(Any ordering expressions that do not appear elsewhere will result in
+the creation of new EquivalenceClasses, but this cannot result in merging
+existing classes, so canonical-ness is not lost.)
+
+Because all the EquivalenceClasses are known before we begin path
+generation, we can use them as a guide to which indexes are of interest:
+if an index's column is not mentioned in any EquivalenceClass then that
+index's sort order cannot possibly be helpful for the query. This allows
+short-circuiting of much of the processing of create_index_paths() for
+irrelevant indexes.
+
+There are some cases where planner.c constructs additional
+EquivalenceClasses and PathKeys after query_planner has completed.
+In these cases, the extra ECs/PKs are needed to represent sort orders
+that were not considered during query_planner. Such situations should be
+minimized since it is impossible for query_planner to return a plan
+producing such a sort order, meaning an explicit sort will always be needed.
+Currently this happens only for queries involving multiple window functions
+with different orderings, for which extra sorts are needed anyway.
+
+
+Parameterized Paths
+-------------------
+
+The naive way to join two relations using a clause like WHERE A.X = B.Y
+is to generate a nestloop plan like this:
+
+ NestLoop
+ Filter: A.X = B.Y
+ -> Seq Scan on A
+ -> Seq Scan on B
+
+We can make this better by using a merge or hash join, but it still
+requires scanning all of both input relations. If A is very small and B is
+very large, but there is an index on B.Y, it can be enormously better to do
+something like this:
+
+ NestLoop
+ -> Seq Scan on A
+ -> Index Scan using B_Y_IDX on B
+ Index Condition: B.Y = A.X
+
+Here, we are expecting that for each row scanned from A, the nestloop
+plan node will pass down the current value of A.X into the scan of B.
+That allows the indexscan to treat A.X as a constant for any one
+invocation, and thereby use it as an index key. This is the only plan type
+that can avoid fetching all of B, and for small numbers of rows coming from
+A, that will dominate every other consideration. (As A gets larger, this
+gets less attractive, and eventually a merge or hash join will win instead.
+So we have to cost out all the alternatives to decide what to do.)
+
+It can be useful for the parameter value to be passed down through
+intermediate layers of joins, for example:
+
+ NestLoop
+ -> Seq Scan on A
+ Hash Join
+ Join Condition: B.Y = C.W
+ -> Seq Scan on B
+ -> Index Scan using C_Z_IDX on C
+ Index Condition: C.Z = A.X
+
+If all joins are plain inner joins then this is usually unnecessary,
+because it's possible to reorder the joins so that a parameter is used
+immediately below the nestloop node that provides it. But in the
+presence of outer joins, such join reordering may not be possible.
+
+Also, the bottom-level scan might require parameters from more than one
+other relation. In principle we could join the other relations first
+so that all the parameters are supplied from a single nestloop level.
+But if those other relations have no join clause in common (which is
+common in star-schema queries for instance), the planner won't consider
+joining them directly to each other. In such a case we need to be able
+to create a plan like
+
+ NestLoop
+ -> Seq Scan on SmallTable1 A
+ NestLoop
+ -> Seq Scan on SmallTable2 B
+ -> Index Scan using XYIndex on LargeTable C
+ Index Condition: C.X = A.AID and C.Y = B.BID
+
+so we should be willing to pass down A.AID through a join even though
+there is no join order constraint forcing the plan to look like this.
+
+Before version 9.2, Postgres used ad-hoc methods for planning and
+executing nestloop queries of this kind, and those methods could not
+handle passing parameters down through multiple join levels.
+
+To plan such queries, we now use a notion of a "parameterized path",
+which is a path that makes use of a join clause to a relation that's not
+scanned by the path. In the example two above, we would construct a
+path representing the possibility of doing this:
+
+ -> Index Scan using C_Z_IDX on C
+ Index Condition: C.Z = A.X
+
+This path will be marked as being parameterized by relation A. (Note that
+this is only one of the possible access paths for C; we'd still have a
+plain unparameterized seqscan, and perhaps other possibilities.) The
+parameterization marker does not prevent joining the path to B, so one of
+the paths generated for the joinrel {B C} will represent
+
+ Hash Join
+ Join Condition: B.Y = C.W
+ -> Seq Scan on B
+ -> Index Scan using C_Z_IDX on C
+ Index Condition: C.Z = A.X
+
+This path is still marked as being parameterized by A. When we attempt to
+join {B C} to A to form the complete join tree, such a path can only be
+used as the inner side of a nestloop join: it will be ignored for other
+possible join types. So we will form a join path representing the query
+plan shown above, and it will compete in the usual way with paths built
+from non-parameterized scans.
+
+While all ordinary paths for a particular relation generate the same set
+of rows (since they must all apply the same set of restriction clauses),
+parameterized paths typically generate fewer rows than less-parameterized
+paths, since they have additional clauses to work with. This means we
+must consider the number of rows generated as an additional figure of
+merit. A path that costs more than another, but generates fewer rows,
+must be kept since the smaller number of rows might save work at some
+intermediate join level. (It would not save anything if joined
+immediately to the source of the parameters.)
+
+To keep cost estimation rules relatively simple, we make an implementation
+restriction that all paths for a given relation of the same parameterization
+(i.e., the same set of outer relations supplying parameters) must have the
+same rowcount estimate. This is justified by insisting that each such path
+apply *all* join clauses that are available with the named outer relations.
+Different paths might, for instance, choose different join clauses to use
+as index clauses; but they must then apply any other join clauses available
+from the same outer relations as filter conditions, so that the set of rows
+returned is held constant. This restriction doesn't degrade the quality of
+the finished plan: it amounts to saying that we should always push down
+movable join clauses to the lowest possible evaluation level, which is a
+good thing anyway. The restriction is useful in particular to support
+pre-filtering of join paths in add_path_precheck. Without this rule we
+could never reject a parameterized path in advance of computing its rowcount
+estimate, which would greatly reduce the value of the pre-filter mechanism.
+
+To limit planning time, we have to avoid generating an unreasonably large
+number of parameterized paths. We do this by only generating parameterized
+relation scan paths for index scans, and then only for indexes for which
+suitable join clauses are available. There are also heuristics in join
+planning that try to limit the number of parameterized paths considered.
+
+In particular, there's been a deliberate policy decision to favor hash
+joins over merge joins for parameterized join steps (those occurring below
+a nestloop that provides parameters to the lower join's inputs). While we
+do not ignore merge joins entirely, joinpath.c does not fully explore the
+space of potential merge joins with parameterized inputs. Also, add_path
+treats parameterized paths as having no pathkeys, so that they compete
+only on cost and rowcount; they don't get preference for producing a
+special sort order. This creates additional bias against merge joins,
+since we might discard a path that could have been useful for performing
+a merge without an explicit sort step. Since a parameterized path must
+ultimately be used on the inside of a nestloop, where its sort order is
+uninteresting, these choices do not affect any requirement for the final
+output order of a query --- they only make it harder to use a merge join
+at a lower level. The savings in planning work justifies that.
+
+Similarly, parameterized paths do not normally get preference in add_path
+for having cheap startup cost; that's seldom of much value when on the
+inside of a nestloop, so it seems not worth keeping extra paths solely for
+that. An exception occurs for parameterized paths for the RHS relation of
+a SEMI or ANTI join: in those cases, we can stop the inner scan after the
+first match, so it's primarily startup not total cost that we care about.
+
+
+LATERAL subqueries
+------------------
+
+As of 9.3 we support SQL-standard LATERAL references from subqueries in
+FROM (and also functions in FROM). The planner implements these by
+generating parameterized paths for any RTE that contains lateral
+references. In such cases, *all* paths for that relation will be
+parameterized by at least the set of relations used in its lateral
+references. (And in turn, join relations including such a subquery might
+not have any unparameterized paths.) All the other comments made above for
+parameterized paths still apply, though; in particular, each such path is
+still expected to enforce any join clauses that can be pushed down to it,
+so that all paths of the same parameterization have the same rowcount.
+
+We also allow LATERAL subqueries to be flattened (pulled up into the parent
+query) by the optimizer, but only when this does not introduce lateral
+references into JOIN/ON quals that would refer to relations outside the
+lowest outer join at/above that qual. The semantics of such a qual would
+be unclear. Note that even with this restriction, pullup of a LATERAL
+subquery can result in creating PlaceHolderVars that contain lateral
+references to relations outside their syntactic scope. We still evaluate
+such PHVs at their syntactic location or lower, but the presence of such a
+PHV in the quals or targetlist of a plan node requires that node to appear
+on the inside of a nestloop join relative to the rel(s) supplying the
+lateral reference. (Perhaps now that that stuff works, we could relax the
+pullup restriction?)
+
+
+Security-level constraints on qual clauses
+------------------------------------------
+
+To support row-level security and security-barrier views efficiently,
+we mark qual clauses (RestrictInfo nodes) with a "security_level" field.
+The basic concept is that a qual with a lower security_level must be
+evaluated before one with a higher security_level. This ensures that
+"leaky" quals that might expose sensitive data are not evaluated until
+after the security barrier quals that are supposed to filter out
+security-sensitive rows. However, many qual conditions are "leakproof",
+that is we trust the functions they use to not expose data. To avoid
+unnecessarily inefficient plans, a leakproof qual is not delayed by
+security-level considerations, even if it has a higher syntactic
+security_level than another qual.
+
+In a query that contains no use of RLS or security-barrier views, all
+quals will have security_level zero, so that none of these restrictions
+kick in; we don't even need to check leakproofness of qual conditions.
+
+If there are security-barrier quals, they get security_level zero (and
+possibly higher, if there are multiple layers of barriers). Regular quals
+coming from the query text get a security_level one more than the highest
+level used for barrier quals.
+
+When new qual clauses are generated by EquivalenceClass processing,
+they must be assigned a security_level. This is trickier than it seems.
+One's first instinct is that it would be safe to use the largest level
+found among the source quals for the EquivalenceClass, but that isn't
+safe at all, because it allows unwanted delays of security-barrier quals.
+Consider a barrier qual "t.x = t.y" plus a query qual "t.x = constant",
+and suppose there is another query qual "leaky_function(t.z)" that
+we mustn't evaluate before the barrier qual has been checked.
+We will have an EC {t.x, t.y, constant} which will lead us to replace
+the EC quals with "t.x = constant AND t.y = constant". (We do not want
+to give up that behavior, either, since the latter condition could allow
+use of an index on t.y, which we would never discover from the original
+quals.) If these generated quals are assigned the same security_level as
+the query quals, then it's possible for the leaky_function qual to be
+evaluated first, allowing leaky_function to see data from rows that
+possibly don't pass the barrier condition.
+
+Instead, our handling of security levels with ECs works like this:
+* Quals are not accepted as source clauses for ECs in the first place
+unless they are leakproof or have security_level zero.
+* EC-derived quals are assigned the minimum (not maximum) security_level
+found among the EC's source clauses.
+* If the maximum security_level found among the EC's source clauses is
+above zero, then the equality operators selected for derived quals must
+be leakproof. When no such operator can be found, the EC is treated as
+"broken" and we fall back to emitting its source clauses without any
+additional derived quals.
+
+These rules together ensure that an untrusted qual clause (one with
+security_level above zero) cannot cause an EC to generate a leaky derived
+clause. This makes it safe to use the minimum not maximum security_level
+for derived clauses. The rules could result in poor plans due to not
+being able to generate derived clauses at all, but the risk of that is
+small in practice because most btree equality operators are leakproof.
+Also, by making exceptions for level-zero quals, we ensure that there is
+no plan degradation when no barrier quals are present.
+
+Once we have security levels assigned to all clauses, enforcement
+of barrier-qual ordering restrictions boils down to two rules:
+
+* Table scan plan nodes must not select quals for early execution
+(for example, use them as index qualifiers in an indexscan) unless
+they are leakproof or have security_level no higher than any other
+qual that is due to be executed at the same plan node. (Use the
+utility function restriction_is_securely_promotable() to check
+whether it's okay to select a qual for early execution.)
+
+* Normal execution of a list of quals must execute them in an order
+that satisfies the same security rule, ie higher security_levels must
+be evaluated later unless leakproof. (This is handled in a single place
+by order_qual_clauses() in createplan.c.)
+
+order_qual_clauses() uses a heuristic to decide exactly what to do with
+leakproof clauses. Normally it sorts clauses by security_level then cost,
+being careful that the sort is stable so that we don't reorder clauses
+without a clear reason. But this could result in a very expensive qual
+being done before a cheaper one that is of higher security_level.
+If the cheaper qual is leaky we have no choice, but if it is leakproof
+we could put it first. We choose to sort leakproof quals as if they
+have security_level zero, but only when their cost is less than 10X
+cpu_operator_cost; that restriction alleviates the opposite problem of
+doing expensive quals first just because they're leakproof.
+
+Additional rules will be needed to support safe handling of join quals
+when there is a mix of security levels among join quals; for example, it
+will be necessary to prevent leaky higher-security-level quals from being
+evaluated at a lower join level than other quals of lower security level.
+Currently there is no need to consider that since security-prioritized
+quals can only be single-table restriction quals coming from RLS policies
+or security-barrier views, and security-barrier view subqueries are never
+flattened into the parent query. Hence enforcement of security-prioritized
+quals only happens at the table scan level. With extra rules for safe
+handling of security levels among join quals, it should be possible to let
+security-barrier views be flattened into the parent query, allowing more
+flexibility of planning while still preserving required ordering of qual
+evaluation. But that will come later.
+
+
+Post scan/join planning
+-----------------------
+
+So far we have discussed only scan/join planning, that is, implementation
+of the FROM and WHERE clauses of a SQL query. But the planner must also
+determine how to deal with GROUP BY, aggregation, and other higher-level
+features of queries; and in many cases there are multiple ways to do these
+steps and thus opportunities for optimization choices. These steps, like
+scan/join planning, are handled by constructing Paths representing the
+different ways to do a step, then choosing the cheapest Path.
+
+Since all Paths require a RelOptInfo as "parent", we create RelOptInfos
+representing the outputs of these upper-level processing steps. These
+RelOptInfos are mostly dummy, but their pathlist lists hold all the Paths
+considered useful for each step. Currently, we may create these types of
+additional RelOptInfos during upper-level planning:
+
+UPPERREL_SETOP result of UNION/INTERSECT/EXCEPT, if any
+UPPERREL_PARTIAL_GROUP_AGG result of partial grouping/aggregation, if any
+UPPERREL_GROUP_AGG result of grouping/aggregation, if any
+UPPERREL_WINDOW result of window functions, if any
+UPPERREL_PARTIAL_DISTINCT result of partial "SELECT DISTINCT", if any
+UPPERREL_DISTINCT result of "SELECT DISTINCT", if any
+UPPERREL_ORDERED result of ORDER BY, if any
+UPPERREL_FINAL result of any remaining top-level actions
+
+UPPERREL_FINAL is used to represent any final processing steps, currently
+LockRows (SELECT FOR UPDATE), LIMIT/OFFSET, and ModifyTable. There is no
+flexibility about the order in which these steps are done, and thus no need
+to subdivide this stage more finely.
+
+These "upper relations" are identified by the UPPERREL enum values shown
+above, plus a relids set, which allows there to be more than one upperrel
+of the same kind. We use NULL for the relids if there's no need for more
+than one upperrel of the same kind. Currently, in fact, the relids set
+is vestigial because it's always NULL, but that's expected to change in
+the future. For example, in planning set operations, we might need the
+relids to denote which subset of the leaf SELECTs has been combined in a
+particular group of Paths that are competing with each other.
+
+The result of subquery_planner() is always returned as a set of Paths
+stored in the UPPERREL_FINAL rel with NULL relids. The other types of
+upperrels are created only if needed for the particular query.
+
+
+Parallel Query and Partial Paths
+--------------------------------
+
+Parallel query involves dividing up the work that needs to be performed
+either by an entire query or some portion of the query in such a way that
+some of that work can be done by one or more worker processes, which are
+called parallel workers. Parallel workers are a subtype of dynamic
+background workers; see src/backend/access/transam/README.parallel for a
+fuller description. The academic literature on parallel query suggests
+that parallel execution strategies can be divided into essentially two
+categories: pipelined parallelism, where the execution of the query is
+divided into multiple stages and each stage is handled by a separate
+process; and partitioning parallelism, where the data is split between
+multiple processes and each process handles a subset of it. The
+literature, however, suggests that gains from pipeline parallelism are
+often very limited due to the difficulty of avoiding pipeline stalls.
+Consequently, we do not currently attempt to generate query plans that
+use this technique.
+
+Instead, we focus on partitioning parallelism, which does not require
+that the underlying table be partitioned. It only requires that (1)
+there is some method of dividing the data from at least one of the base
+tables involved in the relation across multiple processes, (2) allowing
+each process to handle its own portion of the data, and then (3)
+collecting the results. Requirements (2) and (3) are satisfied by the
+executor node Gather (or GatherMerge), which launches any number of worker
+processes and executes its single child plan in all of them, and perhaps
+in the leader also, if the children aren't generating enough data to keep
+the leader busy. Requirement (1) is handled by the table scan node: when
+invoked with parallel_aware = true, this node will, in effect, partition
+the table on a block by block basis, returning a subset of the tuples from
+the relation in each worker where that scan node is executed.
+
+Just as we do for non-parallel access methods, we build Paths to
+represent access strategies that can be used in a parallel plan. These
+are, in essence, the same strategies that are available in the
+non-parallel plan, but there is an important difference: a path that
+will run beneath a Gather node returns only a subset of the query
+results in each worker, not all of them. To form a path that can
+actually be executed, the (rather large) cost of the Gather node must be
+accounted for. For this reason among others, paths intended to run
+beneath a Gather node - which we call "partial" paths since they return
+only a subset of the results in each worker - must be kept separate from
+ordinary paths (see RelOptInfo's partial_pathlist and the function
+add_partial_path).
+
+One of the keys to making parallel query effective is to run as much of
+the query in parallel as possible. Therefore, we expect it to generally
+be desirable to postpone the Gather stage until as near to the top of the
+plan as possible. Expanding the range of cases in which more work can be
+pushed below the Gather (and costing them accurately) is likely to keep us
+busy for a long time to come.
+
+Partitionwise joins
+-------------------
+
+A join between two similarly partitioned tables can be broken down into joins
+between their matching partitions if there exists an equi-join condition
+between the partition keys of the joining tables. The equi-join between
+partition keys implies that all join partners for a given row in one
+partitioned table must be in the corresponding partition of the other
+partitioned table. Because of this the join between partitioned tables to be
+broken into joins between the matching partitions. The resultant join is
+partitioned in the same way as the joining relations, thus allowing an N-way
+join between similarly partitioned tables having equi-join condition between
+their partition keys to be broken down into N-way joins between their matching
+partitions. This technique of breaking down a join between partitioned tables
+into joins between their partitions is called partitionwise join. We will use
+term "partitioned relation" for either a partitioned table or a join between
+compatibly partitioned tables.
+
+Even if the joining relations don't have exactly the same partition bounds,
+partitionwise join can still be applied by using an advanced
+partition-matching algorithm. For both the joining relations, the algorithm
+checks whether every partition of one joining relation only matches one
+partition of the other joining relation at most. In such a case the join
+between the joining relations can be broken down into joins between the
+matching partitions. The join relation can then be considered partitioned.
+The algorithm produces the pairs of the matching partitions, plus the
+partition bounds for the join relation, to allow partitionwise join for
+computing the join. The algorithm is implemented in partition_bounds_merge().
+For an N-way join relation considered partitioned this way, not every pair of
+joining relations can use partitionwise join. For example:
+
+ (A leftjoin B on (Pab)) innerjoin C on (Pac)
+
+where A, B, and C are partitioned tables, and A has an extra partition
+compared to B and C. When considering partitionwise join for the join {A B},
+the extra partition of A doesn't have a matching partition on the nullable
+side, which is the case that the current implementation of partitionwise join
+can't handle. So {A B} is not considered partitioned, and the pair of {A B}
+and C considered for the 3-way join can't use partitionwise join. On the
+other hand, the pair of {A C} and B can use partitionwise join because {A C}
+is considered partitioned by eliminating the extra partition (see identity 1
+on outer join reordering). Whether an N-way join can use partitionwise join
+is determined based on the first pair of joining relations that are both
+partitioned and can use partitionwise join.
+
+The partitioning properties of a partitioned relation are stored in its
+RelOptInfo. The information about data types of partition keys are stored in
+PartitionSchemeData structure. The planner maintains a list of canonical
+partition schemes (distinct PartitionSchemeData objects) so that RelOptInfo of
+any two partitioned relations with same partitioning scheme point to the same
+PartitionSchemeData object. This reduces memory consumed by
+PartitionSchemeData objects and makes it easy to compare the partition schemes
+of joining relations.
+
+Partitionwise aggregates/grouping
+---------------------------------
+
+If the GROUP BY clause contains all of the partition keys, all the rows
+that belong to a given group must come from a single partition; therefore,
+aggregation can be done completely separately for each partition. Otherwise,
+partial aggregates can be computed for each partition, and then finalized
+after appending the results from the individual partitions. This technique of
+breaking down aggregation or grouping over a partitioned relation into
+aggregation or grouping over its partitions is called partitionwise
+aggregation. Especially when the partition keys match the GROUP BY clause,
+this can be significantly faster than the regular method.
diff --git a/src/backend/optimizer/geqo/Makefile b/src/backend/optimizer/geqo/Makefile
new file mode 100644
index 0000000..1978c14
--- /dev/null
+++ b/src/backend/optimizer/geqo/Makefile
@@ -0,0 +1,33 @@
+#-------------------------------------------------------------------------
+#
+# Makefile--
+# Makefile for the genetic query optimizer module
+#
+# Copyright (c) 1994, Regents of the University of California
+#
+# src/backend/optimizer/geqo/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/backend/optimizer/geqo
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+
+OBJS = \
+ geqo_copy.o \
+ geqo_cx.o \
+ geqo_erx.o \
+ geqo_eval.o \
+ geqo_main.o \
+ geqo_misc.o \
+ geqo_mutation.o \
+ geqo_ox1.o \
+ geqo_ox2.o \
+ geqo_pmx.o \
+ geqo_pool.o \
+ geqo_px.o \
+ geqo_random.o \
+ geqo_recombination.o \
+ geqo_selection.o
+
+include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/optimizer/geqo/geqo_copy.c b/src/backend/optimizer/geqo/geqo_copy.c
new file mode 100644
index 0000000..62302f4
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_copy.c
@@ -0,0 +1,54 @@
+/*------------------------------------------------------------------------
+ *
+ * geqo_copy.c
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/backend/optimizer/geqo/geqo_copy.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+/* this is adopted from D. Whitley's Genitor algorithm */
+
+/*************************************************************/
+/* */
+/* Copyright (c) 1990 */
+/* Darrell L. Whitley */
+/* Computer Science Department */
+/* Colorado State University */
+/* */
+/* Permission is hereby granted to copy all or any part of */
+/* this program for free distribution. The author's name */
+/* and this copyright notice must be included in any copy. */
+/* */
+/*************************************************************/
+
+#include "postgres.h"
+#include "optimizer/geqo_copy.h"
+
+/* geqo_copy
+ *
+ * copies one gene to another
+ *
+ */
+void
+geqo_copy(PlannerInfo *root, Chromosome *chromo1, Chromosome *chromo2,
+ int string_length)
+{
+ int i;
+
+ for (i = 0; i < string_length; i++)
+ chromo1->string[i] = chromo2->string[i];
+
+ chromo1->worth = chromo2->worth;
+}
diff --git a/src/backend/optimizer/geqo/geqo_cx.c b/src/backend/optimizer/geqo/geqo_cx.c
new file mode 100644
index 0000000..34cc53a
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_cx.c
@@ -0,0 +1,123 @@
+/*------------------------------------------------------------------------
+*
+* geqo_cx.c
+*
+* cycle crossover [CX] routines;
+* CX operator according to Oliver et al
+* (Proc 2nd Int'l Conf on GA's)
+*
+* src/backend/optimizer/geqo/geqo_cx.c
+*
+*-------------------------------------------------------------------------
+*/
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+/* the cx algorithm is adopted from Genitor : */
+/*************************************************************/
+/* */
+/* Copyright (c) 1990 */
+/* Darrell L. Whitley */
+/* Computer Science Department */
+/* Colorado State University */
+/* */
+/* Permission is hereby granted to copy all or any part of */
+/* this program for free distribution. The author's name */
+/* and this copyright notice must be included in any copy. */
+/* */
+/*************************************************************/
+
+
+#include "postgres.h"
+#include "optimizer/geqo_random.h"
+#include "optimizer/geqo_recombination.h"
+
+#if defined(CX)
+
+/* cx
+ *
+ * cycle crossover
+ */
+int
+cx(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring,
+ int num_gene, City * city_table)
+{
+ int i,
+ start_pos,
+ curr_pos;
+ int count = 0;
+ int num_diffs = 0;
+
+ /* initialize city table */
+ for (i = 1; i <= num_gene; i++)
+ {
+ city_table[i].used = 0;
+ city_table[tour2[i - 1]].tour2_position = i - 1;
+ city_table[tour1[i - 1]].tour1_position = i - 1;
+ }
+
+ /* choose random cycle starting position */
+ start_pos = geqo_randint(root, num_gene - 1, 0);
+
+ /* child inherits first city */
+ offspring[start_pos] = tour1[start_pos];
+
+ /* begin cycle with tour1 */
+ curr_pos = start_pos;
+ city_table[(int) tour1[start_pos]].used = 1;
+
+ count++;
+
+ /* cx main part */
+
+
+/* STEP 1 */
+
+ while (tour2[curr_pos] != tour1[start_pos])
+ {
+ city_table[(int) tour2[curr_pos]].used = 1;
+ curr_pos = city_table[(int) tour2[curr_pos]].tour1_position;
+ offspring[curr_pos] = tour1[curr_pos];
+ count++;
+ }
+
+
+/* STEP 2 */
+
+ /* failed to create a complete tour */
+ if (count < num_gene)
+ {
+ for (i = 1; i <= num_gene; i++)
+ {
+ if (!city_table[i].used)
+ {
+ offspring[city_table[i].tour2_position] =
+ tour2[(int) city_table[i].tour2_position];
+ count++;
+ }
+ }
+ }
+
+
+/* STEP 3 */
+
+ /* still failed to create a complete tour */
+ if (count < num_gene)
+ {
+
+ /* count the number of differences between mom and offspring */
+ for (i = 0; i < num_gene; i++)
+ if (tour1[i] != offspring[i])
+ num_diffs++;
+ }
+
+ return num_diffs;
+}
+
+#endif /* defined(CX) */
diff --git a/src/backend/optimizer/geqo/geqo_erx.c b/src/backend/optimizer/geqo/geqo_erx.c
new file mode 100644
index 0000000..cc06613
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_erx.c
@@ -0,0 +1,468 @@
+/*------------------------------------------------------------------------
+*
+* geqo_erx.c
+* edge recombination crossover [ER]
+*
+* src/backend/optimizer/geqo/geqo_erx.c
+*
+*-------------------------------------------------------------------------
+*/
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+/* the edge recombination algorithm is adopted from Genitor : */
+/*************************************************************/
+/* */
+/* Copyright (c) 1990 */
+/* Darrell L. Whitley */
+/* Computer Science Department */
+/* Colorado State University */
+/* */
+/* Permission is hereby granted to copy all or any part of */
+/* this program for free distribution. The author's name */
+/* and this copyright notice must be included in any copy. */
+/* */
+/*************************************************************/
+
+
+#include "postgres.h"
+#include "optimizer/geqo_random.h"
+#include "optimizer/geqo_recombination.h"
+
+#if defined(ERX)
+
+static int gimme_edge(PlannerInfo *root, Gene gene1, Gene gene2, Edge *edge_table);
+static void remove_gene(PlannerInfo *root, Gene gene, Edge edge, Edge *edge_table);
+static Gene gimme_gene(PlannerInfo *root, Edge edge, Edge *edge_table);
+
+static Gene edge_failure(PlannerInfo *root, Gene *gene, int index, Edge *edge_table, int num_gene);
+
+
+/* alloc_edge_table
+ *
+ * allocate memory for edge table
+ *
+ */
+
+Edge *
+alloc_edge_table(PlannerInfo *root, int num_gene)
+{
+ Edge *edge_table;
+
+ /*
+ * palloc one extra location so that nodes numbered 1..n can be indexed
+ * directly; 0 will not be used
+ */
+
+ edge_table = (Edge *) palloc((num_gene + 1) * sizeof(Edge));
+
+ return edge_table;
+}
+
+/* free_edge_table
+ *
+ * deallocate memory of edge table
+ *
+ */
+void
+free_edge_table(PlannerInfo *root, Edge *edge_table)
+{
+ pfree(edge_table);
+}
+
+/* gimme_edge_table
+ *
+ * fills a data structure which represents the set of explicit
+ * edges between points in the (2) input genes
+ *
+ * assumes circular tours and bidirectional edges
+ *
+ * gimme_edge() will set "shared" edges to negative values
+ *
+ * returns average number edges/city in range 2.0 - 4.0
+ * where 2.0=homogeneous; 4.0=diverse
+ *
+ */
+float
+gimme_edge_table(PlannerInfo *root, Gene *tour1, Gene *tour2,
+ int num_gene, Edge *edge_table)
+{
+ int i,
+ index1,
+ index2;
+ int edge_total; /* total number of unique edges in two genes */
+
+ /* at first clear the edge table's old data */
+ for (i = 1; i <= num_gene; i++)
+ {
+ edge_table[i].total_edges = 0;
+ edge_table[i].unused_edges = 0;
+ }
+
+ /* fill edge table with new data */
+
+ edge_total = 0;
+
+ for (index1 = 0; index1 < num_gene; index1++)
+ {
+ /*
+ * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operation
+ * maps n back to 1
+ */
+
+ index2 = (index1 + 1) % num_gene;
+
+ /*
+ * edges are bidirectional, i.e. 1->2 is same as 2->1 call gimme_edge
+ * twice per edge
+ */
+
+ edge_total += gimme_edge(root, tour1[index1], tour1[index2], edge_table);
+ gimme_edge(root, tour1[index2], tour1[index1], edge_table);
+
+ edge_total += gimme_edge(root, tour2[index1], tour2[index2], edge_table);
+ gimme_edge(root, tour2[index2], tour2[index1], edge_table);
+ }
+
+ /* return average number of edges per index */
+ return ((float) (edge_total * 2) / (float) num_gene);
+}
+
+/* gimme_edge
+ *
+ * registers edge from city1 to city2 in input edge table
+ *
+ * no assumptions about directionality are made;
+ * therefore it is up to the calling routine to
+ * call gimme_edge twice to make a bi-directional edge
+ * between city1 and city2;
+ * uni-directional edges are possible as well (just call gimme_edge
+ * once with the direction from city1 to city2)
+ *
+ * returns 1 if edge was not already registered and was just added;
+ * 0 if edge was already registered and edge_table is unchanged
+ */
+static int
+gimme_edge(PlannerInfo *root, Gene gene1, Gene gene2, Edge *edge_table)
+{
+ int i;
+ int edges;
+ int city1 = (int) gene1;
+ int city2 = (int) gene2;
+
+
+ /* check whether edge city1->city2 already exists */
+ edges = edge_table[city1].total_edges;
+
+ for (i = 0; i < edges; i++)
+ {
+ if ((Gene) Abs(edge_table[city1].edge_list[i]) == city2)
+ {
+
+ /* mark shared edges as negative */
+ edge_table[city1].edge_list[i] = 0 - city2;
+
+ return 0;
+ }
+ }
+
+ /* add city1->city2; */
+ edge_table[city1].edge_list[edges] = city2;
+
+ /* increment the number of edges from city1 */
+ edge_table[city1].total_edges++;
+ edge_table[city1].unused_edges++;
+
+ return 1;
+}
+
+/* gimme_tour
+ *
+ * creates a new tour using edges from the edge table.
+ * priority is given to "shared" edges (i.e. edges which
+ * all parent genes possess and are marked as negative
+ * in the edge table.)
+ *
+ */
+int
+gimme_tour(PlannerInfo *root, Edge *edge_table, Gene *new_gene, int num_gene)
+{
+ int i;
+ int edge_failures = 0;
+
+ /* choose int between 1 and num_gene */
+ new_gene[0] = (Gene) geqo_randint(root, num_gene, 1);
+
+ for (i = 1; i < num_gene; i++)
+ {
+ /*
+ * as each point is entered into the tour, remove it from the edge
+ * table
+ */
+
+ remove_gene(root, new_gene[i - 1], edge_table[(int) new_gene[i - 1]], edge_table);
+
+ /* find destination for the newly entered point */
+
+ if (edge_table[new_gene[i - 1]].unused_edges > 0)
+ new_gene[i] = gimme_gene(root, edge_table[(int) new_gene[i - 1]], edge_table);
+
+ else
+ { /* cope with fault */
+ edge_failures++;
+
+ new_gene[i] = edge_failure(root, new_gene, i - 1, edge_table, num_gene);
+ }
+
+ /* mark this node as incorporated */
+ edge_table[(int) new_gene[i - 1]].unused_edges = -1;
+ } /* for (i=1; i<num_gene; i++) */
+
+ return edge_failures;
+}
+
+/* remove_gene
+ *
+ * removes input gene from edge_table.
+ * input edge is used
+ * to identify deletion locations within edge table.
+ *
+ */
+static void
+remove_gene(PlannerInfo *root, Gene gene, Edge edge, Edge *edge_table)
+{
+ int i,
+ j;
+ int possess_edge;
+ int genes_remaining;
+
+ /*
+ * do for every gene known to have an edge to input gene (i.e. in
+ * edge_list for input edge)
+ */
+
+ for (i = 0; i < edge.unused_edges; i++)
+ {
+ possess_edge = (int) Abs(edge.edge_list[i]);
+ genes_remaining = edge_table[possess_edge].unused_edges;
+
+ /* find the input gene in all edge_lists and delete it */
+ for (j = 0; j < genes_remaining; j++)
+ {
+
+ if ((Gene) Abs(edge_table[possess_edge].edge_list[j]) == gene)
+ {
+
+ edge_table[possess_edge].unused_edges--;
+
+ edge_table[possess_edge].edge_list[j] =
+ edge_table[possess_edge].edge_list[genes_remaining - 1];
+
+ break;
+ }
+ }
+ }
+}
+
+/* gimme_gene
+ *
+ * priority is given to "shared" edges
+ * (i.e. edges which both genes possess)
+ *
+ */
+static Gene
+gimme_gene(PlannerInfo *root, Edge edge, Edge *edge_table)
+{
+ int i;
+ Gene friend;
+ int minimum_edges;
+ int minimum_count = -1;
+ int rand_decision;
+
+ /*
+ * no point has edges to more than 4 other points thus, this contrived
+ * minimum will be replaced
+ */
+
+ minimum_edges = 5;
+
+ /* consider candidate destination points in edge list */
+
+ for (i = 0; i < edge.unused_edges; i++)
+ {
+ friend = (Gene) edge.edge_list[i];
+
+ /*
+ * give priority to shared edges that are negative; so return 'em
+ */
+
+ /*
+ * negative values are caught here so we need not worry about
+ * converting to absolute values
+ */
+ if (friend < 0)
+ return (Gene) Abs(friend);
+
+
+ /*
+ * give priority to candidates with fewest remaining unused edges;
+ * find out what the minimum number of unused edges is
+ * (minimum_edges); if there is more than one candidate with the
+ * minimum number of unused edges keep count of this number
+ * (minimum_count);
+ */
+
+ /*
+ * The test for minimum_count can probably be removed at some point
+ * but comments should probably indicate exactly why it is guaranteed
+ * that the test will always succeed the first time around. If it can
+ * fail then the code is in error
+ */
+
+
+ if (edge_table[(int) friend].unused_edges < minimum_edges)
+ {
+ minimum_edges = edge_table[(int) friend].unused_edges;
+ minimum_count = 1;
+ }
+ else if (minimum_count == -1)
+ elog(ERROR, "minimum_count not set");
+ else if (edge_table[(int) friend].unused_edges == minimum_edges)
+ minimum_count++;
+ } /* for (i=0; i<edge.unused_edges; i++) */
+
+
+ /* random decision of the possible candidates to use */
+ rand_decision = geqo_randint(root, minimum_count - 1, 0);
+
+
+ for (i = 0; i < edge.unused_edges; i++)
+ {
+ friend = (Gene) edge.edge_list[i];
+
+ /* return the chosen candidate point */
+ if (edge_table[(int) friend].unused_edges == minimum_edges)
+ {
+ minimum_count--;
+
+ if (minimum_count == rand_decision)
+ return friend;
+ }
+ }
+
+ /* ... should never be reached */
+ elog(ERROR, "neither shared nor minimum number nor random edge found");
+ return 0; /* to keep the compiler quiet */
+}
+
+/* edge_failure
+ *
+ * routine for handling edge failure
+ *
+ */
+static Gene
+edge_failure(PlannerInfo *root, Gene *gene, int index, Edge *edge_table, int num_gene)
+{
+ int i;
+ Gene fail_gene = gene[index];
+ int remaining_edges = 0;
+ int four_count = 0;
+ int rand_decision;
+
+
+ /*
+ * how many edges remain? how many gene with four total (initial) edges
+ * remain?
+ */
+
+ for (i = 1; i <= num_gene; i++)
+ {
+ if ((edge_table[i].unused_edges != -1) && (i != (int) fail_gene))
+ {
+ remaining_edges++;
+
+ if (edge_table[i].total_edges == 4)
+ four_count++;
+ }
+ }
+
+ /*
+ * random decision of the gene with remaining edges and whose total_edges
+ * == 4
+ */
+
+ if (four_count != 0)
+ {
+
+ rand_decision = geqo_randint(root, four_count - 1, 0);
+
+ for (i = 1; i <= num_gene; i++)
+ {
+
+ if ((Gene) i != fail_gene &&
+ edge_table[i].unused_edges != -1 &&
+ edge_table[i].total_edges == 4)
+ {
+
+ four_count--;
+
+ if (rand_decision == four_count)
+ return (Gene) i;
+ }
+ }
+
+ elog(LOG, "no edge found via random decision and total_edges == 4");
+ }
+ else if (remaining_edges != 0)
+ {
+ /* random decision of the gene with remaining edges */
+ rand_decision = geqo_randint(root, remaining_edges - 1, 0);
+
+ for (i = 1; i <= num_gene; i++)
+ {
+
+ if ((Gene) i != fail_gene &&
+ edge_table[i].unused_edges != -1)
+ {
+
+ remaining_edges--;
+
+ if (rand_decision == remaining_edges)
+ return i;
+ }
+ }
+
+ elog(LOG, "no edge found via random decision with remaining edges");
+ }
+
+ /*
+ * edge table seems to be empty; this happens sometimes on the last point
+ * due to the fact that the first point is removed from the table even
+ * though only one of its edges has been determined
+ */
+
+ else
+ { /* occurs only at the last point in the tour;
+ * simply look for the point which is not yet
+ * used */
+
+ for (i = 1; i <= num_gene; i++)
+ if (edge_table[i].unused_edges >= 0)
+ return (Gene) i;
+
+ elog(LOG, "no edge found via looking for the last unused point");
+ }
+
+
+ /* ... should never be reached */
+ elog(ERROR, "no edge found");
+ return 0; /* to keep the compiler quiet */
+}
+
+#endif /* defined(ERX) */
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
new file mode 100644
index 0000000..004481d
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -0,0 +1,338 @@
+/*------------------------------------------------------------------------
+ *
+ * geqo_eval.c
+ * Routines to evaluate query trees
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/backend/optimizer/geqo/geqo_eval.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+#include "postgres.h"
+
+#include <float.h>
+#include <limits.h>
+#include <math.h>
+
+#include "optimizer/geqo.h"
+#include "optimizer/joininfo.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "utils/memutils.h"
+
+
+/* A "clump" of already-joined relations within gimme_tree */
+typedef struct
+{
+ RelOptInfo *joinrel; /* joinrel for the set of relations */
+ int size; /* number of input relations in clump */
+} Clump;
+
+static List *merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump,
+ int num_gene, bool force);
+static bool desirable_join(PlannerInfo *root,
+ RelOptInfo *outer_rel, RelOptInfo *inner_rel);
+
+
+/*
+ * geqo_eval
+ *
+ * Returns cost of a query tree as an individual of the population.
+ *
+ * If no legal join order can be extracted from the proposed tour,
+ * returns DBL_MAX.
+ */
+Cost
+geqo_eval(PlannerInfo *root, Gene *tour, int num_gene)
+{
+ MemoryContext mycontext;
+ MemoryContext oldcxt;
+ RelOptInfo *joinrel;
+ Cost fitness;
+ int savelength;
+ struct HTAB *savehash;
+
+ /*
+ * Create a private memory context that will hold all temp storage
+ * allocated inside gimme_tree().
+ *
+ * Since geqo_eval() will be called many times, we can't afford to let all
+ * that memory go unreclaimed until end of statement. Note we make the
+ * temp context a child of the planner's normal context, so that it will
+ * be freed even if we abort via ereport(ERROR).
+ */
+ mycontext = AllocSetContextCreate(CurrentMemoryContext,
+ "GEQO",
+ ALLOCSET_DEFAULT_SIZES);
+ oldcxt = MemoryContextSwitchTo(mycontext);
+
+ /*
+ * gimme_tree will add entries to root->join_rel_list, which may or may
+ * not already contain some entries. The newly added entries will be
+ * recycled by the MemoryContextDelete below, so we must ensure that the
+ * list is restored to its former state before exiting. We can do this by
+ * truncating the list to its original length. NOTE this assumes that any
+ * added entries are appended at the end!
+ *
+ * We also must take care not to mess up the outer join_rel_hash, if there
+ * is one. We can do this by just temporarily setting the link to NULL.
+ * (If we are dealing with enough join rels, which we very likely are, a
+ * new hash table will get built and used locally.)
+ *
+ * join_rel_level[] shouldn't be in use, so just Assert it isn't.
+ */
+ savelength = list_length(root->join_rel_list);
+ savehash = root->join_rel_hash;
+ Assert(root->join_rel_level == NULL);
+
+ root->join_rel_hash = NULL;
+
+ /* construct the best path for the given combination of relations */
+ joinrel = gimme_tree(root, tour, num_gene);
+
+ /*
+ * compute fitness, if we found a valid join
+ *
+ * XXX geqo does not currently support optimization for partial result
+ * retrieval, nor do we take any cognizance of possible use of
+ * parameterized paths --- how to fix?
+ */
+ if (joinrel)
+ {
+ Path *best_path = joinrel->cheapest_total_path;
+
+ fitness = best_path->total_cost;
+ }
+ else
+ fitness = DBL_MAX;
+
+ /*
+ * Restore join_rel_list to its former state, and put back original
+ * hashtable if any.
+ */
+ root->join_rel_list = list_truncate(root->join_rel_list,
+ savelength);
+ root->join_rel_hash = savehash;
+
+ /* release all the memory acquired within gimme_tree */
+ MemoryContextSwitchTo(oldcxt);
+ MemoryContextDelete(mycontext);
+
+ return fitness;
+}
+
+/*
+ * gimme_tree
+ * Form planner estimates for a join tree constructed in the specified
+ * order.
+ *
+ * 'tour' is the proposed join order, of length 'num_gene'
+ *
+ * Returns a new join relation whose cheapest path is the best plan for
+ * this join order. NB: will return NULL if join order is invalid and
+ * we can't modify it into a valid order.
+ *
+ * The original implementation of this routine always joined in the specified
+ * order, and so could only build left-sided plans (and right-sided and
+ * mixtures, as a byproduct of the fact that make_join_rel() is symmetric).
+ * It could never produce a "bushy" plan. This had a couple of big problems,
+ * of which the worst was that there are situations involving join order
+ * restrictions where the only valid plans are bushy.
+ *
+ * The present implementation takes the given tour as a guideline, but
+ * postpones joins that are illegal or seem unsuitable according to some
+ * heuristic rules. This allows correct bushy plans to be generated at need,
+ * and as a nice side-effect it seems to materially improve the quality of the
+ * generated plans. Note however that since it's just a heuristic, it can
+ * still fail in some cases. (In particular, we might clump together
+ * relations that actually mustn't be joined yet due to LATERAL restrictions;
+ * since there's no provision for un-clumping, this must lead to failure.)
+ */
+RelOptInfo *
+gimme_tree(PlannerInfo *root, Gene *tour, int num_gene)
+{
+ GeqoPrivateData *private = (GeqoPrivateData *) root->join_search_private;
+ List *clumps;
+ int rel_count;
+
+ /*
+ * Sometimes, a relation can't yet be joined to others due to heuristics
+ * or actual semantic restrictions. We maintain a list of "clumps" of
+ * successfully joined relations, with larger clumps at the front. Each
+ * new relation from the tour is added to the first clump it can be joined
+ * to; if there is none then it becomes a new clump of its own. When we
+ * enlarge an existing clump we check to see if it can now be merged with
+ * any other clumps. After the tour is all scanned, we forget about the
+ * heuristics and try to forcibly join any remaining clumps. If we are
+ * unable to merge all the clumps into one, fail.
+ */
+ clumps = NIL;
+
+ for (rel_count = 0; rel_count < num_gene; rel_count++)
+ {
+ int cur_rel_index;
+ RelOptInfo *cur_rel;
+ Clump *cur_clump;
+
+ /* Get the next input relation */
+ cur_rel_index = (int) tour[rel_count];
+ cur_rel = (RelOptInfo *) list_nth(private->initial_rels,
+ cur_rel_index - 1);
+
+ /* Make it into a single-rel clump */
+ cur_clump = (Clump *) palloc(sizeof(Clump));
+ cur_clump->joinrel = cur_rel;
+ cur_clump->size = 1;
+
+ /* Merge it into the clumps list, using only desirable joins */
+ clumps = merge_clump(root, clumps, cur_clump, num_gene, false);
+ }
+
+ if (list_length(clumps) > 1)
+ {
+ /* Force-join the remaining clumps in some legal order */
+ List *fclumps;
+ ListCell *lc;
+
+ fclumps = NIL;
+ foreach(lc, clumps)
+ {
+ Clump *clump = (Clump *) lfirst(lc);
+
+ fclumps = merge_clump(root, fclumps, clump, num_gene, true);
+ }
+ clumps = fclumps;
+ }
+
+ /* Did we succeed in forming a single join relation? */
+ if (list_length(clumps) != 1)
+ return NULL;
+
+ return ((Clump *) linitial(clumps))->joinrel;
+}
+
+/*
+ * Merge a "clump" into the list of existing clumps for gimme_tree.
+ *
+ * We try to merge the clump into some existing clump, and repeat if
+ * successful. When no more merging is possible, insert the clump
+ * into the list, preserving the list ordering rule (namely, that
+ * clumps of larger size appear earlier).
+ *
+ * If force is true, merge anywhere a join is legal, even if it causes
+ * a cartesian join to be performed. When force is false, do only
+ * "desirable" joins.
+ */
+static List *
+merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, int num_gene,
+ bool force)
+{
+ ListCell *lc;
+ int pos;
+
+ /* Look for a clump that new_clump can join to */
+ foreach(lc, clumps)
+ {
+ Clump *old_clump = (Clump *) lfirst(lc);
+
+ if (force ||
+ desirable_join(root, old_clump->joinrel, new_clump->joinrel))
+ {
+ RelOptInfo *joinrel;
+
+ /*
+ * Construct a RelOptInfo representing the join of these two input
+ * relations. Note that we expect the joinrel not to exist in
+ * root->join_rel_list yet, and so the paths constructed for it
+ * will only include the ones we want.
+ */
+ joinrel = make_join_rel(root,
+ old_clump->joinrel,
+ new_clump->joinrel);
+
+ /* Keep searching if join order is not valid */
+ if (joinrel)
+ {
+ /* Create paths for partitionwise joins. */
+ generate_partitionwise_join_paths(root, joinrel);
+
+ /*
+ * Except for the topmost scan/join rel, consider gathering
+ * partial paths. We'll do the same for the topmost scan/join
+ * rel once we know the final targetlist (see
+ * grouping_planner).
+ */
+ if (!bms_equal(joinrel->relids, root->all_baserels))
+ generate_useful_gather_paths(root, joinrel, false);
+
+ /* Find and save the cheapest paths for this joinrel */
+ set_cheapest(joinrel);
+
+ /* Absorb new clump into old */
+ old_clump->joinrel = joinrel;
+ old_clump->size += new_clump->size;
+ pfree(new_clump);
+
+ /* Remove old_clump from list */
+ clumps = foreach_delete_current(clumps, lc);
+
+ /*
+ * Recursively try to merge the enlarged old_clump with
+ * others. When no further merge is possible, we'll reinsert
+ * it into the list.
+ */
+ return merge_clump(root, clumps, old_clump, num_gene, force);
+ }
+ }
+ }
+
+ /*
+ * No merging is possible, so add new_clump as an independent clump, in
+ * proper order according to size. We can be fast for the common case
+ * where it has size 1 --- it should always go at the end.
+ */
+ if (clumps == NIL || new_clump->size == 1)
+ return lappend(clumps, new_clump);
+
+ /* Else search for the place to insert it */
+ for (pos = 0; pos < list_length(clumps); pos++)
+ {
+ Clump *old_clump = (Clump *) list_nth(clumps, pos);
+
+ if (new_clump->size > old_clump->size)
+ break; /* new_clump belongs before old_clump */
+ }
+ clumps = list_insert_nth(clumps, pos, new_clump);
+
+ return clumps;
+}
+
+/*
+ * Heuristics for gimme_tree: do we want to join these two relations?
+ */
+static bool
+desirable_join(PlannerInfo *root,
+ RelOptInfo *outer_rel, RelOptInfo *inner_rel)
+{
+ /*
+ * Join if there is an applicable join clause, or if there is a join order
+ * restriction forcing these rels to be joined.
+ */
+ if (have_relevant_joinclause(root, outer_rel, inner_rel) ||
+ have_join_order_restriction(root, outer_rel, inner_rel))
+ return true;
+
+ /* Otherwise postpone the join till later. */
+ return false;
+}
diff --git a/src/backend/optimizer/geqo/geqo_main.c b/src/backend/optimizer/geqo/geqo_main.c
new file mode 100644
index 0000000..68ed743
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_main.c
@@ -0,0 +1,353 @@
+/*------------------------------------------------------------------------
+ *
+ * geqo_main.c
+ * solution to the query optimization problem
+ * by means of a Genetic Algorithm (GA)
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/backend/optimizer/geqo/geqo_main.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+/* -- parts of this are adapted from D. Whitley's Genitor algorithm -- */
+
+#include "postgres.h"
+
+#include <math.h>
+
+#include "optimizer/geqo_misc.h"
+#include "optimizer/geqo_mutation.h"
+#include "optimizer/geqo_pool.h"
+#include "optimizer/geqo_random.h"
+#include "optimizer/geqo_selection.h"
+
+
+/*
+ * Configuration options
+ */
+int Geqo_effort;
+int Geqo_pool_size;
+int Geqo_generations;
+double Geqo_selection_bias;
+double Geqo_seed;
+
+
+static int gimme_pool_size(int nr_rel);
+static int gimme_number_generations(int pool_size);
+
+/* complain if no recombination mechanism is #define'd */
+#if !defined(ERX) && \
+ !defined(PMX) && \
+ !defined(CX) && \
+ !defined(PX) && \
+ !defined(OX1) && \
+ !defined(OX2)
+#error "must choose one GEQO recombination mechanism in geqo.h"
+#endif
+
+
+/*
+ * geqo
+ * solution of the query optimization problem
+ * similar to a constrained Traveling Salesman Problem (TSP)
+ */
+
+RelOptInfo *
+geqo(PlannerInfo *root, int number_of_rels, List *initial_rels)
+{
+ GeqoPrivateData private;
+ int generation;
+ Chromosome *momma;
+ Chromosome *daddy;
+ Chromosome *kid;
+ Pool *pool;
+ int pool_size,
+ number_generations;
+
+#ifdef GEQO_DEBUG
+ int status_interval;
+#endif
+ Gene *best_tour;
+ RelOptInfo *best_rel;
+
+#if defined(ERX)
+ Edge *edge_table; /* list of edges */
+ int edge_failures = 0;
+#endif
+#if defined(CX) || defined(PX) || defined(OX1) || defined(OX2)
+ City *city_table; /* list of cities */
+#endif
+#if defined(CX)
+ int cycle_diffs = 0;
+ int mutations = 0;
+#endif
+
+/* set up private information */
+ root->join_search_private = (void *) &private;
+ private.initial_rels = initial_rels;
+
+/* initialize private number generator */
+ geqo_set_seed(root, Geqo_seed);
+
+/* set GA parameters */
+ pool_size = gimme_pool_size(number_of_rels);
+ number_generations = gimme_number_generations(pool_size);
+#ifdef GEQO_DEBUG
+ status_interval = 10;
+#endif
+
+/* allocate genetic pool memory */
+ pool = alloc_pool(root, pool_size, number_of_rels);
+
+/* random initialization of the pool */
+ random_init_pool(root, pool);
+
+/* sort the pool according to cheapest path as fitness */
+ sort_pool(root, pool); /* we have to do it only one time, since all
+ * kids replace the worst individuals in
+ * future (-> geqo_pool.c:spread_chromo ) */
+
+#ifdef GEQO_DEBUG
+ elog(DEBUG1, "GEQO selected %d pool entries, best %.2f, worst %.2f",
+ pool_size,
+ pool->data[0].worth,
+ pool->data[pool_size - 1].worth);
+#endif
+
+/* allocate chromosome momma and daddy memory */
+ momma = alloc_chromo(root, pool->string_length);
+ daddy = alloc_chromo(root, pool->string_length);
+
+#if defined (ERX)
+#ifdef GEQO_DEBUG
+ elog(DEBUG2, "using edge recombination crossover [ERX]");
+#endif
+/* allocate edge table memory */
+ edge_table = alloc_edge_table(root, pool->string_length);
+#elif defined(PMX)
+#ifdef GEQO_DEBUG
+ elog(DEBUG2, "using partially matched crossover [PMX]");
+#endif
+/* allocate chromosome kid memory */
+ kid = alloc_chromo(root, pool->string_length);
+#elif defined(CX)
+#ifdef GEQO_DEBUG
+ elog(DEBUG2, "using cycle crossover [CX]");
+#endif
+/* allocate city table memory */
+ kid = alloc_chromo(root, pool->string_length);
+ city_table = alloc_city_table(root, pool->string_length);
+#elif defined(PX)
+#ifdef GEQO_DEBUG
+ elog(DEBUG2, "using position crossover [PX]");
+#endif
+/* allocate city table memory */
+ kid = alloc_chromo(root, pool->string_length);
+ city_table = alloc_city_table(root, pool->string_length);
+#elif defined(OX1)
+#ifdef GEQO_DEBUG
+ elog(DEBUG2, "using order crossover [OX1]");
+#endif
+/* allocate city table memory */
+ kid = alloc_chromo(root, pool->string_length);
+ city_table = alloc_city_table(root, pool->string_length);
+#elif defined(OX2)
+#ifdef GEQO_DEBUG
+ elog(DEBUG2, "using order crossover [OX2]");
+#endif
+/* allocate city table memory */
+ kid = alloc_chromo(root, pool->string_length);
+ city_table = alloc_city_table(root, pool->string_length);
+#endif
+
+
+/* my pain main part: */
+/* iterative optimization */
+
+ for (generation = 0; generation < number_generations; generation++)
+ {
+ /* SELECTION: using linear bias function */
+ geqo_selection(root, momma, daddy, pool, Geqo_selection_bias);
+
+#if defined (ERX)
+ /* EDGE RECOMBINATION CROSSOVER */
+ gimme_edge_table(root, momma->string, daddy->string, pool->string_length, edge_table);
+
+ kid = momma;
+
+ /* are there any edge failures ? */
+ edge_failures += gimme_tour(root, edge_table, kid->string, pool->string_length);
+#elif defined(PMX)
+ /* PARTIALLY MATCHED CROSSOVER */
+ pmx(root, momma->string, daddy->string, kid->string, pool->string_length);
+#elif defined(CX)
+ /* CYCLE CROSSOVER */
+ cycle_diffs = cx(root, momma->string, daddy->string, kid->string, pool->string_length, city_table);
+ /* mutate the child */
+ if (cycle_diffs == 0)
+ {
+ mutations++;
+ geqo_mutation(root, kid->string, pool->string_length);
+ }
+#elif defined(PX)
+ /* POSITION CROSSOVER */
+ px(root, momma->string, daddy->string, kid->string, pool->string_length, city_table);
+#elif defined(OX1)
+ /* ORDER CROSSOVER */
+ ox1(root, momma->string, daddy->string, kid->string, pool->string_length, city_table);
+#elif defined(OX2)
+ /* ORDER CROSSOVER */
+ ox2(root, momma->string, daddy->string, kid->string, pool->string_length, city_table);
+#endif
+
+
+ /* EVALUATE FITNESS */
+ kid->worth = geqo_eval(root, kid->string, pool->string_length);
+
+ /* push the kid into the wilderness of life according to its worth */
+ spread_chromo(root, kid, pool);
+
+
+#ifdef GEQO_DEBUG
+ if (status_interval && !(generation % status_interval))
+ print_gen(stdout, pool, generation);
+#endif
+
+ }
+
+
+#if defined(ERX)
+#if defined(GEQO_DEBUG)
+ if (edge_failures != 0)
+ elog(LOG, "[GEQO] failures: %d, average: %d",
+ edge_failures, (int) number_generations / edge_failures);
+ else
+ elog(LOG, "[GEQO] no edge failures detected");
+#else
+ /* suppress variable-set-but-not-used warnings from some compilers */
+ (void) edge_failures;
+#endif
+#endif
+
+#if defined(CX) && defined(GEQO_DEBUG)
+ if (mutations != 0)
+ elog(LOG, "[GEQO] mutations: %d, generations: %d",
+ mutations, number_generations);
+ else
+ elog(LOG, "[GEQO] no mutations processed");
+#endif
+
+#ifdef GEQO_DEBUG
+ print_pool(stdout, pool, 0, pool_size - 1);
+#endif
+
+#ifdef GEQO_DEBUG
+ elog(DEBUG1, "GEQO best is %.2f after %d generations",
+ pool->data[0].worth, number_generations);
+#endif
+
+
+ /*
+ * got the cheapest query tree processed by geqo; first element of the
+ * population indicates the best query tree
+ */
+ best_tour = (Gene *) pool->data[0].string;
+
+ best_rel = gimme_tree(root, best_tour, pool->string_length);
+
+ if (best_rel == NULL)
+ elog(ERROR, "geqo failed to make a valid plan");
+
+ /* DBG: show the query plan */
+#ifdef NOT_USED
+ print_plan(best_plan, root);
+#endif
+
+ /* ... free memory stuff */
+ free_chromo(root, momma);
+ free_chromo(root, daddy);
+
+#if defined (ERX)
+ free_edge_table(root, edge_table);
+#elif defined(PMX)
+ free_chromo(root, kid);
+#elif defined(CX)
+ free_chromo(root, kid);
+ free_city_table(root, city_table);
+#elif defined(PX)
+ free_chromo(root, kid);
+ free_city_table(root, city_table);
+#elif defined(OX1)
+ free_chromo(root, kid);
+ free_city_table(root, city_table);
+#elif defined(OX2)
+ free_chromo(root, kid);
+ free_city_table(root, city_table);
+#endif
+
+ free_pool(root, pool);
+
+ /* ... clear root pointer to our private storage */
+ root->join_search_private = NULL;
+
+ return best_rel;
+}
+
+
+/*
+ * Return either configured pool size or a good default
+ *
+ * The default is based on query size (no. of relations) = 2^(QS+1),
+ * but constrained to a range based on the effort value.
+ */
+static int
+gimme_pool_size(int nr_rel)
+{
+ double size;
+ int minsize;
+ int maxsize;
+
+ /* Legal pool size *must* be at least 2, so ignore attempt to select 1 */
+ if (Geqo_pool_size >= 2)
+ return Geqo_pool_size;
+
+ size = pow(2.0, nr_rel + 1.0);
+
+ maxsize = 50 * Geqo_effort; /* 50 to 500 individuals */
+ if (size > maxsize)
+ return maxsize;
+
+ minsize = 10 * Geqo_effort; /* 10 to 100 individuals */
+ if (size < minsize)
+ return minsize;
+
+ return (int) ceil(size);
+}
+
+
+/*
+ * Return either configured number of generations or a good default
+ *
+ * The default is the same as the pool size, which allows us to be
+ * sure that less-fit individuals get pushed out of the breeding
+ * population before the run finishes.
+ */
+static int
+gimme_number_generations(int pool_size)
+{
+ if (Geqo_generations > 0)
+ return Geqo_generations;
+
+ return pool_size;
+}
diff --git a/src/backend/optimizer/geqo/geqo_misc.c b/src/backend/optimizer/geqo/geqo_misc.c
new file mode 100644
index 0000000..890ac36
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_misc.c
@@ -0,0 +1,132 @@
+/*------------------------------------------------------------------------
+ *
+ * geqo_misc.c
+ * misc. printout and debug stuff
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/backend/optimizer/geqo/geqo_misc.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+#include "postgres.h"
+
+#include "optimizer/geqo_misc.h"
+
+
+#ifdef GEQO_DEBUG
+
+
+/*
+ * avg_pool
+ */
+static double
+avg_pool(Pool *pool)
+{
+ int i;
+ double cumulative = 0.0;
+
+ if (pool->size <= 0)
+ elog(ERROR, "pool_size is zero");
+
+ /*
+ * Since the pool may contain multiple occurrences of DBL_MAX, divide by
+ * pool->size before summing, not after, to avoid overflow. This loses a
+ * little in speed and accuracy, but this routine is only used for debug
+ * printouts, so we don't care that much.
+ */
+ for (i = 0; i < pool->size; i++)
+ cumulative += pool->data[i].worth / pool->size;
+
+ return cumulative;
+}
+
+/* print_pool
+ */
+void
+print_pool(FILE *fp, Pool *pool, int start, int stop)
+{
+ int i,
+ j;
+
+ /* be extra careful that start and stop are valid inputs */
+
+ if (start < 0)
+ start = 0;
+ if (stop > pool->size)
+ stop = pool->size;
+
+ if (start + stop > pool->size)
+ {
+ start = 0;
+ stop = pool->size;
+ }
+
+ for (i = start; i < stop; i++)
+ {
+ fprintf(fp, "%d)\t", i);
+ for (j = 0; j < pool->string_length; j++)
+ fprintf(fp, "%d ", pool->data[i].string[j]);
+ fprintf(fp, "%g\n", pool->data[i].worth);
+ }
+
+ fflush(fp);
+}
+
+/* print_gen
+ *
+ * printout for chromosome: best, worst, mean, average
+ */
+void
+print_gen(FILE *fp, Pool *pool, int generation)
+{
+ int lowest;
+
+ /* Get index to lowest ranking gene in population. */
+ /* Use 2nd to last since last is buffer. */
+ lowest = pool->size > 1 ? pool->size - 2 : 0;
+
+ fprintf(fp,
+ "%5d | Best: %g Worst: %g Mean: %g Avg: %g\n",
+ generation,
+ pool->data[0].worth,
+ pool->data[lowest].worth,
+ pool->data[pool->size / 2].worth,
+ avg_pool(pool));
+
+ fflush(fp);
+}
+
+
+void
+print_edge_table(FILE *fp, Edge *edge_table, int num_gene)
+{
+ int i,
+ j;
+
+ fprintf(fp, "\nEDGE TABLE\n");
+
+ for (i = 1; i <= num_gene; i++)
+ {
+ fprintf(fp, "%d :", i);
+ for (j = 0; j < edge_table[i].unused_edges; j++)
+ fprintf(fp, " %d", edge_table[i].edge_list[j]);
+ fprintf(fp, "\n");
+ }
+
+ fprintf(fp, "\n");
+
+ fflush(fp);
+}
+
+#endif /* GEQO_DEBUG */
diff --git a/src/backend/optimizer/geqo/geqo_mutation.c b/src/backend/optimizer/geqo/geqo_mutation.c
new file mode 100644
index 0000000..2af0295
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_mutation.c
@@ -0,0 +1,66 @@
+/*------------------------------------------------------------------------
+*
+* geqo_mutation.c
+*
+* TSP mutation routines
+*
+* src/backend/optimizer/geqo/geqo_mutation.c
+*
+*-------------------------------------------------------------------------
+*/
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+/* this is adopted from Genitor : */
+/*************************************************************/
+/* */
+/* Copyright (c) 1990 */
+/* Darrell L. Whitley */
+/* Computer Science Department */
+/* Colorado State University */
+/* */
+/* Permission is hereby granted to copy all or any part of */
+/* this program for free distribution. The author's name */
+/* and this copyright notice must be included in any copy. */
+/* */
+/*************************************************************/
+
+#include "postgres.h"
+#include "optimizer/geqo_mutation.h"
+#include "optimizer/geqo_random.h"
+
+#if defined(CX) /* currently used only in CX mode */
+
+void
+geqo_mutation(PlannerInfo *root, Gene *tour, int num_gene)
+{
+ int swap1;
+ int swap2;
+ int num_swaps = geqo_randint(root, num_gene / 3, 0);
+ Gene temp;
+
+
+ while (num_swaps > 0)
+ {
+ swap1 = geqo_randint(root, num_gene - 1, 0);
+ swap2 = geqo_randint(root, num_gene - 1, 0);
+
+ while (swap1 == swap2)
+ swap2 = geqo_randint(root, num_gene - 1, 0);
+
+ temp = tour[swap1];
+ tour[swap1] = tour[swap2];
+ tour[swap2] = temp;
+
+
+ num_swaps -= 1;
+ }
+}
+
+#endif /* defined(CX) */
diff --git a/src/backend/optimizer/geqo/geqo_ox1.c b/src/backend/optimizer/geqo/geqo_ox1.c
new file mode 100644
index 0000000..ede7abb
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_ox1.c
@@ -0,0 +1,94 @@
+/*------------------------------------------------------------------------
+*
+* geqo_ox1.c
+*
+* order crossover [OX] routines;
+* OX1 operator according to Davis
+* (Proc Int'l Joint Conf on AI)
+*
+* src/backend/optimizer/geqo/geqo_ox1.c
+*
+*-------------------------------------------------------------------------
+*/
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+/* the ox algorithm is adopted from Genitor : */
+/*************************************************************/
+/* */
+/* Copyright (c) 1990 */
+/* Darrell L. Whitley */
+/* Computer Science Department */
+/* Colorado State University */
+/* */
+/* Permission is hereby granted to copy all or any part of */
+/* this program for free distribution. The author's name */
+/* and this copyright notice must be included in any copy. */
+/* */
+/*************************************************************/
+
+#include "postgres.h"
+#include "optimizer/geqo_random.h"
+#include "optimizer/geqo_recombination.h"
+
+#if defined(OX1)
+
+/* ox1
+ *
+ * position crossover
+ */
+void
+ox1(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene,
+ City * city_table)
+{
+ int left,
+ right,
+ k,
+ p,
+ temp;
+
+ /* initialize city table */
+ for (k = 1; k <= num_gene; k++)
+ city_table[k].used = 0;
+
+ /* select portion to copy from tour1 */
+ left = geqo_randint(root, num_gene - 1, 0);
+ right = geqo_randint(root, num_gene - 1, 0);
+
+ if (left > right)
+ {
+ temp = left;
+ left = right;
+ right = temp;
+ }
+
+ /* copy portion from tour1 to offspring */
+ for (k = left; k <= right; k++)
+ {
+ offspring[k] = tour1[k];
+ city_table[(int) tour1[k]].used = 1;
+ }
+
+ k = (right + 1) % num_gene; /* index into offspring */
+ p = k; /* index into tour2 */
+
+ /* copy stuff from tour2 to offspring */
+ while (k != left)
+ {
+ if (!city_table[(int) tour2[p]].used)
+ {
+ offspring[k] = tour2[p];
+ k = (k + 1) % num_gene;
+ city_table[(int) tour2[p]].used = 1;
+ }
+ p = (p + 1) % num_gene; /* increment tour2-index */
+ }
+}
+
+#endif /* defined(OX1) */
diff --git a/src/backend/optimizer/geqo/geqo_ox2.c b/src/backend/optimizer/geqo/geqo_ox2.c
new file mode 100644
index 0000000..080dbc0
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_ox2.c
@@ -0,0 +1,111 @@
+/*------------------------------------------------------------------------
+*
+* geqo_ox2.c
+*
+* order crossover [OX] routines;
+* OX2 operator according to Syswerda
+* (The Genetic Algorithms Handbook, ed L Davis)
+*
+* src/backend/optimizer/geqo/geqo_ox2.c
+*
+*-------------------------------------------------------------------------
+*/
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+/* the ox algorithm is adopted from Genitor : */
+/*************************************************************/
+/* */
+/* Copyright (c) 1990 */
+/* Darrell L. Whitley */
+/* Computer Science Department */
+/* Colorado State University */
+/* */
+/* Permission is hereby granted to copy all or any part of */
+/* this program for free distribution. The author's name */
+/* and this copyright notice must be included in any copy. */
+/* */
+/*************************************************************/
+
+#include "postgres.h"
+#include "optimizer/geqo_random.h"
+#include "optimizer/geqo_recombination.h"
+
+#if defined(OX2)
+
+/* ox2
+ *
+ * position crossover
+ */
+void
+ox2(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene, City * city_table)
+{
+ int k,
+ j,
+ count,
+ pos,
+ select,
+ num_positions;
+
+ /* initialize city table */
+ for (k = 1; k <= num_gene; k++)
+ {
+ city_table[k].used = 0;
+ city_table[k - 1].select_list = -1;
+ }
+
+ /* determine the number of positions to be inherited from tour1 */
+ num_positions = geqo_randint(root, 2 * num_gene / 3, num_gene / 3);
+
+ /* make a list of selected cities */
+ for (k = 0; k < num_positions; k++)
+ {
+ pos = geqo_randint(root, num_gene - 1, 0);
+ city_table[pos].select_list = (int) tour1[pos];
+ city_table[(int) tour1[pos]].used = 1; /* mark used */
+ }
+
+
+ count = 0;
+ k = 0;
+
+ /* consolidate the select list to adjacent positions */
+ while (count < num_positions)
+ {
+ if (city_table[k].select_list == -1)
+ {
+ j = k + 1;
+ while ((city_table[j].select_list == -1) && (j < num_gene))
+ j++;
+
+ city_table[k].select_list = city_table[j].select_list;
+ city_table[j].select_list = -1;
+ count++;
+ }
+ else
+ count++;
+ k++;
+ }
+
+ select = 0;
+
+ for (k = 0; k < num_gene; k++)
+ {
+ if (city_table[(int) tour2[k]].used)
+ {
+ offspring[k] = (Gene) city_table[select].select_list;
+ select++; /* next city in the select list */
+ }
+ else
+ /* city isn't used yet, so inherit from tour2 */
+ offspring[k] = tour2[k];
+ }
+}
+
+#endif /* defined(OX2) */
diff --git a/src/backend/optimizer/geqo/geqo_pmx.c b/src/backend/optimizer/geqo/geqo_pmx.c
new file mode 100644
index 0000000..e44fd0b
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_pmx.c
@@ -0,0 +1,218 @@
+/*------------------------------------------------------------------------
+*
+* geqo_pmx.c
+*
+* partially matched crossover [PMX] routines;
+* PMX operator according to Goldberg & Lingle
+* (Proc Int'l Conf on GA's)
+*
+* src/backend/optimizer/geqo/geqo_pmx.c
+*
+*-------------------------------------------------------------------------
+*/
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+/* the pmx algorithm is adopted from Genitor : */
+/*************************************************************/
+/* */
+/* Copyright (c) 1990 */
+/* Darrell L. Whitley */
+/* Computer Science Department */
+/* Colorado State University */
+/* */
+/* Permission is hereby granted to copy all or any part of */
+/* this program for free distribution. The author's name */
+/* and this copyright notice must be included in any copy. */
+/* */
+/*************************************************************/
+
+#include "postgres.h"
+#include "optimizer/geqo_random.h"
+#include "optimizer/geqo_recombination.h"
+
+#if defined(PMX)
+
+/* pmx
+ *
+ * partially matched crossover
+ */
+void
+pmx(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene)
+{
+ int *failed = (int *) palloc((num_gene + 1) * sizeof(int));
+ int *from = (int *) palloc((num_gene + 1) * sizeof(int));
+ int *indx = (int *) palloc((num_gene + 1) * sizeof(int));
+ int *check_list = (int *) palloc((num_gene + 1) * sizeof(int));
+
+ int left,
+ right,
+ temp,
+ i,
+ j,
+ k;
+ int mx_fail,
+ found,
+ mx_hold;
+
+
+/* no mutation so start up the pmx replacement algorithm */
+/* initialize failed[], from[], check_list[] */
+ for (k = 0; k < num_gene; k++)
+ {
+ failed[k] = -1;
+ from[k] = -1;
+ check_list[k + 1] = 0;
+ }
+
+/* locate crossover points */
+ left = geqo_randint(root, num_gene - 1, 0);
+ right = geqo_randint(root, num_gene - 1, 0);
+
+ if (left > right)
+ {
+ temp = left;
+ left = right;
+ right = temp;
+ }
+
+
+/* copy tour2 into offspring */
+ for (k = 0; k < num_gene; k++)
+ {
+ offspring[k] = tour2[k];
+ from[k] = DAD;
+ check_list[tour2[k]]++;
+ }
+
+/* copy tour1 into offspring */
+ for (k = left; k <= right; k++)
+ {
+ check_list[offspring[k]]--;
+ offspring[k] = tour1[k];
+ from[k] = MOM;
+ check_list[tour1[k]]++;
+ }
+
+
+/* pmx main part */
+
+ mx_fail = 0;
+
+/* STEP 1 */
+
+ for (k = left; k <= right; k++)
+ { /* for all elements in the tour1-2 */
+
+ if (tour1[k] == tour2[k])
+ found = 1; /* find match in tour2 */
+
+ else
+ {
+ found = 0; /* substitute elements */
+
+ j = 0;
+ while (!(found) && (j < num_gene))
+ {
+ if ((offspring[j] == tour1[k]) && (from[j] == DAD))
+ {
+
+ check_list[offspring[j]]--;
+ offspring[j] = tour2[k];
+ found = 1;
+ check_list[tour2[k]]++;
+ }
+
+ j++;
+ }
+ }
+
+ if (!(found))
+ { /* failed to replace gene */
+ failed[mx_fail] = (int) tour1[k];
+ indx[mx_fail] = k;
+ mx_fail++;
+ }
+ } /* ... for */
+
+
+/* STEP 2 */
+
+ /* see if any genes could not be replaced */
+ if (mx_fail > 0)
+ {
+ mx_hold = mx_fail;
+
+ for (k = 0; k < mx_hold; k++)
+ {
+ found = 0;
+
+ j = 0;
+ while (!(found) && (j < num_gene))
+ {
+
+ if ((failed[k] == (int) offspring[j]) && (from[j] == DAD))
+ {
+ check_list[offspring[j]]--;
+ offspring[j] = tour2[indx[k]];
+ check_list[tour2[indx[k]]]++;
+
+ found = 1;
+ failed[k] = -1;
+ mx_fail--;
+ }
+
+ j++;
+ }
+ } /* ... for */
+ } /* ... if */
+
+
+/* STEP 3 */
+
+ for (k = 1; k <= num_gene; k++)
+ {
+
+ if (check_list[k] > 1)
+ {
+ i = 0;
+
+ while (i < num_gene)
+ {
+ if ((offspring[i] == (Gene) k) && (from[i] == DAD))
+ {
+ j = 1;
+
+ while (j <= num_gene)
+ {
+ if (check_list[j] == 0)
+ {
+ offspring[i] = (Gene) j;
+ check_list[k]--;
+ check_list[j]++;
+ i = num_gene + 1;
+ j = i;
+ }
+
+ j++;
+ }
+ } /* ... if */
+
+ i++;
+ } /* end while */
+ }
+ } /* ... for */
+
+ pfree(failed);
+ pfree(from);
+ pfree(indx);
+ pfree(check_list);
+}
+
+#endif /* defined(PMX) */
diff --git a/src/backend/optimizer/geqo/geqo_pool.c b/src/backend/optimizer/geqo/geqo_pool.c
new file mode 100644
index 0000000..0dfef1f
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_pool.c
@@ -0,0 +1,265 @@
+/*------------------------------------------------------------------------
+ *
+ * geqo_pool.c
+ * Genetic Algorithm (GA) pool stuff
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/backend/optimizer/geqo/geqo_pool.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+/* -- parts of this are adapted from D. Whitley's Genitor algorithm -- */
+
+#include "postgres.h"
+
+#include <float.h>
+#include <limits.h>
+#include <math.h>
+
+#include "optimizer/geqo_copy.h"
+#include "optimizer/geqo_pool.h"
+#include "optimizer/geqo_recombination.h"
+
+
+static int compare(const void *arg1, const void *arg2);
+
+/*
+ * alloc_pool
+ * allocates memory for GA pool
+ */
+Pool *
+alloc_pool(PlannerInfo *root, int pool_size, int string_length)
+{
+ Pool *new_pool;
+ Chromosome *chromo;
+ int i;
+
+ /* pool */
+ new_pool = (Pool *) palloc(sizeof(Pool));
+ new_pool->size = (int) pool_size;
+ new_pool->string_length = (int) string_length;
+
+ /* all chromosome */
+ new_pool->data = (Chromosome *) palloc(pool_size * sizeof(Chromosome));
+
+ /* all gene */
+ chromo = (Chromosome *) new_pool->data; /* vector of all chromos */
+ for (i = 0; i < pool_size; i++)
+ chromo[i].string = palloc((string_length + 1) * sizeof(Gene));
+
+ return new_pool;
+}
+
+/*
+ * free_pool
+ * deallocates memory for GA pool
+ */
+void
+free_pool(PlannerInfo *root, Pool *pool)
+{
+ Chromosome *chromo;
+ int i;
+
+ /* all gene */
+ chromo = (Chromosome *) pool->data; /* vector of all chromos */
+ for (i = 0; i < pool->size; i++)
+ pfree(chromo[i].string);
+
+ /* all chromosome */
+ pfree(pool->data);
+
+ /* pool */
+ pfree(pool);
+}
+
+/*
+ * random_init_pool
+ * initialize genetic pool
+ */
+void
+random_init_pool(PlannerInfo *root, Pool *pool)
+{
+ Chromosome *chromo = (Chromosome *) pool->data;
+ int i;
+ int bad = 0;
+
+ /*
+ * We immediately discard any invalid individuals (those that geqo_eval
+ * returns DBL_MAX for), thereby not wasting pool space on them.
+ *
+ * If we fail to make any valid individuals after 10000 tries, give up;
+ * this probably means something is broken, and we shouldn't just let
+ * ourselves get stuck in an infinite loop.
+ */
+ i = 0;
+ while (i < pool->size)
+ {
+ init_tour(root, chromo[i].string, pool->string_length);
+ pool->data[i].worth = geqo_eval(root, chromo[i].string,
+ pool->string_length);
+ if (pool->data[i].worth < DBL_MAX)
+ i++;
+ else
+ {
+ bad++;
+ if (i == 0 && bad >= 10000)
+ elog(ERROR, "geqo failed to make a valid plan");
+ }
+ }
+
+#ifdef GEQO_DEBUG
+ if (bad > 0)
+ elog(DEBUG1, "%d invalid tours found while selecting %d pool entries",
+ bad, pool->size);
+#endif
+}
+
+/*
+ * sort_pool
+ * sorts input pool according to worth, from smallest to largest
+ *
+ * maybe you have to change compare() for different ordering ...
+ */
+void
+sort_pool(PlannerInfo *root, Pool *pool)
+{
+ qsort(pool->data, pool->size, sizeof(Chromosome), compare);
+}
+
+/*
+ * compare
+ * qsort comparison function for sort_pool
+ */
+static int
+compare(const void *arg1, const void *arg2)
+{
+ const Chromosome *chromo1 = (const Chromosome *) arg1;
+ const Chromosome *chromo2 = (const Chromosome *) arg2;
+
+ if (chromo1->worth == chromo2->worth)
+ return 0;
+ else if (chromo1->worth > chromo2->worth)
+ return 1;
+ else
+ return -1;
+}
+
+/* alloc_chromo
+ * allocates a chromosome and string space
+ */
+Chromosome *
+alloc_chromo(PlannerInfo *root, int string_length)
+{
+ Chromosome *chromo;
+
+ chromo = (Chromosome *) palloc(sizeof(Chromosome));
+ chromo->string = (Gene *) palloc((string_length + 1) * sizeof(Gene));
+
+ return chromo;
+}
+
+/* free_chromo
+ * deallocates a chromosome and string space
+ */
+void
+free_chromo(PlannerInfo *root, Chromosome *chromo)
+{
+ pfree(chromo->string);
+ pfree(chromo);
+}
+
+/* spread_chromo
+ * inserts a new chromosome into the pool, displacing worst gene in pool
+ * assumes best->worst = smallest->largest
+ */
+void
+spread_chromo(PlannerInfo *root, Chromosome *chromo, Pool *pool)
+{
+ int top,
+ mid,
+ bot;
+ int i,
+ index;
+ Chromosome swap_chromo,
+ tmp_chromo;
+
+ /* new chromo is so bad we can't use it */
+ if (chromo->worth > pool->data[pool->size - 1].worth)
+ return;
+
+ /* do a binary search to find the index of the new chromo */
+
+ top = 0;
+ mid = pool->size / 2;
+ bot = pool->size - 1;
+ index = -1;
+
+ while (index == -1)
+ {
+ /* these 4 cases find a new location */
+
+ if (chromo->worth <= pool->data[top].worth)
+ index = top;
+ else if (chromo->worth == pool->data[mid].worth)
+ index = mid;
+ else if (chromo->worth == pool->data[bot].worth)
+ index = bot;
+ else if (bot - top <= 1)
+ index = bot;
+
+
+ /*
+ * these 2 cases move the search indices since a new location has not
+ * yet been found.
+ */
+
+ else if (chromo->worth < pool->data[mid].worth)
+ {
+ bot = mid;
+ mid = top + ((bot - top) / 2);
+ }
+ else
+ { /* (chromo->worth > pool->data[mid].worth) */
+ top = mid;
+ mid = top + ((bot - top) / 2);
+ }
+ } /* ... while */
+
+ /* now we have index for chromo */
+
+ /*
+ * move every gene from index on down one position to make room for chromo
+ */
+
+ /*
+ * copy new gene into pool storage; always replace worst gene in pool
+ */
+
+ geqo_copy(root, &pool->data[pool->size - 1], chromo, pool->string_length);
+
+ swap_chromo.string = pool->data[pool->size - 1].string;
+ swap_chromo.worth = pool->data[pool->size - 1].worth;
+
+ for (i = index; i < pool->size; i++)
+ {
+ tmp_chromo.string = pool->data[i].string;
+ tmp_chromo.worth = pool->data[i].worth;
+
+ pool->data[i].string = swap_chromo.string;
+ pool->data[i].worth = swap_chromo.worth;
+
+ swap_chromo.string = tmp_chromo.string;
+ swap_chromo.worth = tmp_chromo.worth;
+ }
+}
diff --git a/src/backend/optimizer/geqo/geqo_px.c b/src/backend/optimizer/geqo/geqo_px.c
new file mode 100644
index 0000000..914296b
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_px.c
@@ -0,0 +1,107 @@
+/*------------------------------------------------------------------------
+*
+* geqo_px.c
+*
+* position crossover [PX] routines;
+* PX operator according to Syswerda
+* (The Genetic Algorithms Handbook, L Davis, ed)
+*
+* src/backend/optimizer/geqo/geqo_px.c
+*
+*-------------------------------------------------------------------------
+*/
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+/* the px algorithm is adopted from Genitor : */
+/*************************************************************/
+/* */
+/* Copyright (c) 1990 */
+/* Darrell L. Whitley */
+/* Computer Science Department */
+/* Colorado State University */
+/* */
+/* Permission is hereby granted to copy all or any part of */
+/* this program for free distribution. The author's name */
+/* and this copyright notice must be included in any copy. */
+/* */
+/*************************************************************/
+
+#include "postgres.h"
+#include "optimizer/geqo_random.h"
+#include "optimizer/geqo_recombination.h"
+
+#if defined(PX)
+
+/* px
+ *
+ * position crossover
+ */
+void
+px(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene,
+ City * city_table)
+{
+ int num_positions;
+ int i,
+ pos,
+ tour2_index,
+ offspring_index;
+
+ /* initialize city table */
+ for (i = 1; i <= num_gene; i++)
+ city_table[i].used = 0;
+
+ /* choose random positions that will be inherited directly from parent */
+ num_positions = geqo_randint(root, 2 * num_gene / 3, num_gene / 3);
+
+ /* choose random position */
+ for (i = 0; i < num_positions; i++)
+ {
+ pos = geqo_randint(root, num_gene - 1, 0);
+
+ offspring[pos] = tour1[pos]; /* transfer cities to child */
+ city_table[(int) tour1[pos]].used = 1; /* mark city used */
+ }
+
+ tour2_index = 0;
+ offspring_index = 0;
+
+
+ /* px main part */
+
+ while (offspring_index < num_gene)
+ {
+
+ /* next position in offspring filled */
+ if (!city_table[(int) tour1[offspring_index]].used)
+ {
+
+ /* next city in tour1 not used */
+ if (!city_table[(int) tour2[tour2_index]].used)
+ {
+
+ /* inherit from tour1 */
+ offspring[offspring_index] = tour2[tour2_index];
+
+ tour2_index++;
+ offspring_index++;
+ }
+ else
+ { /* next city in tour2 has been used */
+ tour2_index++;
+ }
+ }
+ else
+ { /* next position in offspring is filled */
+ offspring_index++;
+ }
+ }
+}
+
+#endif /* defined(PX) */
diff --git a/src/backend/optimizer/geqo/geqo_random.c b/src/backend/optimizer/geqo/geqo_random.c
new file mode 100644
index 0000000..c6b7648
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_random.c
@@ -0,0 +1,45 @@
+/*------------------------------------------------------------------------
+ *
+ * geqo_random.c
+ * random number generator
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/backend/optimizer/geqo/geqo_random.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "optimizer/geqo_random.h"
+
+
+void
+geqo_set_seed(PlannerInfo *root, double seed)
+{
+ GeqoPrivateData *private = (GeqoPrivateData *) root->join_search_private;
+
+ pg_prng_fseed(&private->random_state, seed);
+}
+
+double
+geqo_rand(PlannerInfo *root)
+{
+ GeqoPrivateData *private = (GeqoPrivateData *) root->join_search_private;
+
+ return pg_prng_double(&private->random_state);
+}
+
+int
+geqo_randint(PlannerInfo *root, int upper, int lower)
+{
+ GeqoPrivateData *private = (GeqoPrivateData *) root->join_search_private;
+
+ /*
+ * In current usage, "lower" is never negative so we can just use
+ * pg_prng_uint64_range directly.
+ */
+ return (int) pg_prng_uint64_range(&private->random_state, lower, upper);
+}
diff --git a/src/backend/optimizer/geqo/geqo_recombination.c b/src/backend/optimizer/geqo/geqo_recombination.c
new file mode 100644
index 0000000..a5d3e47
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_recombination.c
@@ -0,0 +1,92 @@
+/*------------------------------------------------------------------------
+*
+* geqo_recombination.c
+* misc recombination procedures
+*
+* src/backend/optimizer/geqo/geqo_recombination.c
+*
+*-------------------------------------------------------------------------
+*/
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+/* -- parts of this are adapted from D. Whitley's Genitor algorithm -- */
+
+#include "postgres.h"
+
+#include "optimizer/geqo_random.h"
+#include "optimizer/geqo_recombination.h"
+
+
+/*
+ * init_tour
+ *
+ * Randomly generates a legal "traveling salesman" tour
+ * (i.e. where each point is visited only once.)
+ */
+void
+init_tour(PlannerInfo *root, Gene *tour, int num_gene)
+{
+ int i,
+ j;
+
+ /*
+ * We must fill the tour[] array with a random permutation of the numbers
+ * 1 .. num_gene. We can do that in one pass using the "inside-out"
+ * variant of the Fisher-Yates shuffle algorithm. Notionally, we append
+ * each new value to the array and then swap it with a randomly-chosen
+ * array element (possibly including itself, else we fail to generate
+ * permutations with the last city last). The swap step can be optimized
+ * by combining it with the insertion.
+ */
+ if (num_gene > 0)
+ tour[0] = (Gene) 1;
+
+ for (i = 1; i < num_gene; i++)
+ {
+ j = geqo_randint(root, i, 0);
+ /* i != j check avoids fetching uninitialized array element */
+ if (i != j)
+ tour[i] = tour[j];
+ tour[j] = (Gene) (i + 1);
+ }
+}
+
+/* city table is used in these recombination methods: */
+#if defined(CX) || defined(PX) || defined(OX1) || defined(OX2)
+
+/* alloc_city_table
+ *
+ * allocate memory for city table
+ */
+City *
+alloc_city_table(PlannerInfo *root, int num_gene)
+{
+ City *city_table;
+
+ /*
+ * palloc one extra location so that nodes numbered 1..n can be indexed
+ * directly; 0 will not be used
+ */
+ city_table = (City *) palloc((num_gene + 1) * sizeof(City));
+
+ return city_table;
+}
+
+/* free_city_table
+ *
+ * deallocate memory of city table
+ */
+void
+free_city_table(PlannerInfo *root, City * city_table)
+{
+ pfree(city_table);
+}
+
+#endif /* CX || PX || OX1 || OX2 */
diff --git a/src/backend/optimizer/geqo/geqo_selection.c b/src/backend/optimizer/geqo/geqo_selection.c
new file mode 100644
index 0000000..50f678a
--- /dev/null
+++ b/src/backend/optimizer/geqo/geqo_selection.c
@@ -0,0 +1,111 @@
+/*-------------------------------------------------------------------------
+ *
+ * geqo_selection.c
+ * linear selection scheme for the genetic query optimizer
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/backend/optimizer/geqo/geqo_selection.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* contributed by:
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ * Martin Utesch * Institute of Automatic Control *
+ = = University of Mining and Technology =
+ * utesch@aut.tu-freiberg.de * Freiberg, Germany *
+ =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=
+ */
+
+/* this is adopted from D. Whitley's Genitor algorithm */
+
+/*************************************************************/
+/* */
+/* Copyright (c) 1990 */
+/* Darrell L. Whitley */
+/* Computer Science Department */
+/* Colorado State University */
+/* */
+/* Permission is hereby granted to copy all or any part of */
+/* this program for free distribution. The author's name */
+/* and this copyright notice must be included in any copy. */
+/* */
+/*************************************************************/
+
+#include "postgres.h"
+
+#include <math.h>
+
+#include "optimizer/geqo_copy.h"
+#include "optimizer/geqo_random.h"
+#include "optimizer/geqo_selection.h"
+
+static int linear_rand(PlannerInfo *root, int max, double bias);
+
+
+/*
+ * geqo_selection
+ * according to bias described by input parameters,
+ * first and second genes are selected from the pool
+ */
+void
+geqo_selection(PlannerInfo *root, Chromosome *momma, Chromosome *daddy,
+ Pool *pool, double bias)
+{
+ int first,
+ second;
+
+ first = linear_rand(root, pool->size, bias);
+ second = linear_rand(root, pool->size, bias);
+
+ /*
+ * Ensure we have selected different genes, except if pool size is only
+ * one, when we can't.
+ */
+ if (pool->size > 1)
+ {
+ while (first == second)
+ second = linear_rand(root, pool->size, bias);
+ }
+
+ geqo_copy(root, momma, &pool->data[first], pool->string_length);
+ geqo_copy(root, daddy, &pool->data[second], pool->string_length);
+}
+
+/*
+ * linear_rand
+ * generates random integer between 0 and input max number
+ * using input linear bias
+ *
+ * bias is y-intercept of linear distribution
+ *
+ * probability distribution function is: f(x) = bias - 2(bias - 1)x
+ * bias = (prob of first rule) / (prob of middle rule)
+ */
+static int
+linear_rand(PlannerInfo *root, int pool_size, double bias)
+{
+ double index; /* index between 0 and pool_size */
+ double max = (double) pool_size;
+
+ /*
+ * geqo_rand() is not supposed to return 1.0, but if it does then we will
+ * get exactly max from this equation, whereas we need 0 <= index < max.
+ * Also it seems possible that roundoff error might deliver values
+ * slightly outside the range; in particular avoid passing a value
+ * slightly less than 0 to sqrt(). If we get a bad value just try again.
+ */
+ do
+ {
+ double sqrtval;
+
+ sqrtval = (bias * bias) - 4.0 * (bias - 1.0) * geqo_rand(root);
+ if (sqrtval > 0.0)
+ sqrtval = sqrt(sqrtval);
+ index = max * (bias - sqrtval) / 2.0 / (bias - 1.0);
+ } while (index < 0.0 || index >= max);
+
+ return (int) index;
+}
diff --git a/src/backend/optimizer/path/Makefile b/src/backend/optimizer/path/Makefile
new file mode 100644
index 0000000..1e199ff
--- /dev/null
+++ b/src/backend/optimizer/path/Makefile
@@ -0,0 +1,26 @@
+#-------------------------------------------------------------------------
+#
+# Makefile--
+# Makefile for optimizer/path
+#
+# IDENTIFICATION
+# src/backend/optimizer/path/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/backend/optimizer/path
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+
+OBJS = \
+ allpaths.o \
+ clausesel.o \
+ costsize.o \
+ equivclass.o \
+ indxpath.o \
+ joinpath.o \
+ joinrels.o \
+ pathkeys.o \
+ tidpath.o
+
+include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
new file mode 100644
index 0000000..4e02439
--- /dev/null
+++ b/src/backend/optimizer/path/allpaths.c
@@ -0,0 +1,4668 @@
+/*-------------------------------------------------------------------------
+ *
+ * allpaths.c
+ * Routines to find possible search paths for processing a query
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/path/allpaths.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include <limits.h>
+#include <math.h>
+
+#include "access/sysattr.h"
+#include "access/tsmapi.h"
+#include "catalog/pg_class.h"
+#include "catalog/pg_operator.h"
+#include "catalog/pg_proc.h"
+#include "foreign/fdwapi.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "nodes/supportnodes.h"
+#ifdef OPTIMIZER_DEBUG
+#include "nodes/print.h"
+#endif
+#include "optimizer/appendinfo.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/geqo.h"
+#include "optimizer/inherit.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/plancat.h"
+#include "optimizer/planner.h"
+#include "optimizer/restrictinfo.h"
+#include "optimizer/tlist.h"
+#include "parser/parse_clause.h"
+#include "parser/parsetree.h"
+#include "partitioning/partbounds.h"
+#include "partitioning/partprune.h"
+#include "rewrite/rewriteManip.h"
+#include "utils/lsyscache.h"
+
+
+/* Bitmask flags for pushdown_safety_info.unsafeFlags */
+#define UNSAFE_HAS_VOLATILE_FUNC (1 << 0)
+#define UNSAFE_HAS_SET_FUNC (1 << 1)
+#define UNSAFE_NOTIN_DISTINCTON_CLAUSE (1 << 2)
+#define UNSAFE_NOTIN_PARTITIONBY_CLAUSE (1 << 3)
+#define UNSAFE_TYPE_MISMATCH (1 << 4)
+
+/* results of subquery_is_pushdown_safe */
+typedef struct pushdown_safety_info
+{
+ unsigned char *unsafeFlags; /* bitmask of reasons why this target list
+ * column is unsafe for qual pushdown, or 0 if
+ * no reason. */
+ bool unsafeVolatile; /* don't push down volatile quals */
+ bool unsafeLeaky; /* don't push down leaky quals */
+} pushdown_safety_info;
+
+/* Return type for qual_is_pushdown_safe */
+typedef enum pushdown_safe_type
+{
+ PUSHDOWN_UNSAFE, /* unsafe to push qual into subquery */
+ PUSHDOWN_SAFE, /* safe to push qual into subquery */
+ PUSHDOWN_WINDOWCLAUSE_RUNCOND /* unsafe, but may work as WindowClause
+ * run condition */
+} pushdown_safe_type;
+
+/* These parameters are set by GUC */
+bool enable_geqo = false; /* just in case GUC doesn't set it */
+int geqo_threshold;
+int min_parallel_table_scan_size;
+int min_parallel_index_scan_size;
+
+/* Hook for plugins to get control in set_rel_pathlist() */
+set_rel_pathlist_hook_type set_rel_pathlist_hook = NULL;
+
+/* Hook for plugins to replace standard_join_search() */
+join_search_hook_type join_search_hook = NULL;
+
+
+static void set_base_rel_consider_startup(PlannerInfo *root);
+static void set_base_rel_sizes(PlannerInfo *root);
+static void set_base_rel_pathlists(PlannerInfo *root);
+static void set_rel_size(PlannerInfo *root, RelOptInfo *rel,
+ Index rti, RangeTblEntry *rte);
+static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ Index rti, RangeTblEntry *rte);
+static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel);
+static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
+ Index rti, RangeTblEntry *rte);
+static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ Index rti, RangeTblEntry *rte);
+static void generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
+ List *live_childrels,
+ List *all_child_pathkeys);
+static Path *get_cheapest_parameterized_child_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Relids required_outer);
+static void accumulate_append_subpath(Path *path,
+ List **subpaths,
+ List **special_subpaths);
+static Path *get_singleton_append_subpath(Path *path);
+static void set_dummy_rel_pathlist(RelOptInfo *rel);
+static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ Index rti, RangeTblEntry *rte);
+static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void set_result_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
+static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
+static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
+ pushdown_safety_info *safetyInfo);
+static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
+ pushdown_safety_info *safetyInfo);
+static void check_output_expressions(Query *subquery,
+ pushdown_safety_info *safetyInfo);
+static void compare_tlist_datatypes(List *tlist, List *colTypes,
+ pushdown_safety_info *safetyInfo);
+static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query);
+static pushdown_safe_type qual_is_pushdown_safe(Query *subquery, Index rti,
+ RestrictInfo *rinfo,
+ pushdown_safety_info *safetyInfo);
+static void subquery_push_qual(Query *subquery,
+ RangeTblEntry *rte, Index rti, Node *qual);
+static void recurse_push_qual(Node *setOp, Query *topquery,
+ RangeTblEntry *rte, Index rti, Node *qual);
+static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel,
+ Bitmapset *extra_used_attrs);
+
+
+/*
+ * make_one_rel
+ * Finds all possible access paths for executing a query, returning a
+ * single rel that represents the join of all base rels in the query.
+ */
+RelOptInfo *
+make_one_rel(PlannerInfo *root, List *joinlist)
+{
+ RelOptInfo *rel;
+ Index rti;
+ double total_pages;
+
+ /*
+ * Construct the all_baserels Relids set.
+ */
+ root->all_baserels = NULL;
+ for (rti = 1; rti < root->simple_rel_array_size; rti++)
+ {
+ RelOptInfo *brel = root->simple_rel_array[rti];
+
+ /* there may be empty slots corresponding to non-baserel RTEs */
+ if (brel == NULL)
+ continue;
+
+ Assert(brel->relid == rti); /* sanity check on array */
+
+ /* ignore RTEs that are "other rels" */
+ if (brel->reloptkind != RELOPT_BASEREL)
+ continue;
+
+ root->all_baserels = bms_add_member(root->all_baserels, brel->relid);
+ }
+
+ /* Mark base rels as to whether we care about fast-start plans */
+ set_base_rel_consider_startup(root);
+
+ /*
+ * Compute size estimates and consider_parallel flags for each base rel.
+ */
+ set_base_rel_sizes(root);
+
+ /*
+ * We should now have size estimates for every actual table involved in
+ * the query, and we also know which if any have been deleted from the
+ * query by join removal, pruned by partition pruning, or eliminated by
+ * constraint exclusion. So we can now compute total_table_pages.
+ *
+ * Note that appendrels are not double-counted here, even though we don't
+ * bother to distinguish RelOptInfos for appendrel parents, because the
+ * parents will have pages = 0.
+ *
+ * XXX if a table is self-joined, we will count it once per appearance,
+ * which perhaps is the wrong thing ... but that's not completely clear,
+ * and detecting self-joins here is difficult, so ignore it for now.
+ */
+ total_pages = 0;
+ for (rti = 1; rti < root->simple_rel_array_size; rti++)
+ {
+ RelOptInfo *brel = root->simple_rel_array[rti];
+
+ if (brel == NULL)
+ continue;
+
+ Assert(brel->relid == rti); /* sanity check on array */
+
+ if (IS_DUMMY_REL(brel))
+ continue;
+
+ if (IS_SIMPLE_REL(brel))
+ total_pages += (double) brel->pages;
+ }
+ root->total_table_pages = total_pages;
+
+ /*
+ * Generate access paths for each base rel.
+ */
+ set_base_rel_pathlists(root);
+
+ /*
+ * Generate access paths for the entire join tree.
+ */
+ rel = make_rel_from_joinlist(root, joinlist);
+
+ /*
+ * The result should join all and only the query's base rels.
+ */
+ Assert(bms_equal(rel->relids, root->all_baserels));
+
+ return rel;
+}
+
+/*
+ * set_base_rel_consider_startup
+ * Set the consider_[param_]startup flags for each base-relation entry.
+ *
+ * For the moment, we only deal with consider_param_startup here; because the
+ * logic for consider_startup is pretty trivial and is the same for every base
+ * relation, we just let build_simple_rel() initialize that flag correctly to
+ * start with. If that logic ever gets more complicated it would probably
+ * be better to move it here.
+ */
+static void
+set_base_rel_consider_startup(PlannerInfo *root)
+{
+ /*
+ * Since parameterized paths can only be used on the inside of a nestloop
+ * join plan, there is usually little value in considering fast-start
+ * plans for them. However, for relations that are on the RHS of a SEMI
+ * or ANTI join, a fast-start plan can be useful because we're only going
+ * to care about fetching one tuple anyway.
+ *
+ * To minimize growth of planning time, we currently restrict this to
+ * cases where the RHS is a single base relation, not a join; there is no
+ * provision for consider_param_startup to get set at all on joinrels.
+ * Also we don't worry about appendrels. costsize.c's costing rules for
+ * nestloop semi/antijoins don't consider such cases either.
+ */
+ ListCell *lc;
+
+ foreach(lc, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
+ int varno;
+
+ if ((sjinfo->jointype == JOIN_SEMI || sjinfo->jointype == JOIN_ANTI) &&
+ bms_get_singleton_member(sjinfo->syn_righthand, &varno))
+ {
+ RelOptInfo *rel = find_base_rel(root, varno);
+
+ rel->consider_param_startup = true;
+ }
+ }
+}
+
+/*
+ * set_base_rel_sizes
+ * Set the size estimates (rows and widths) for each base-relation entry.
+ * Also determine whether to consider parallel paths for base relations.
+ *
+ * We do this in a separate pass over the base rels so that rowcount
+ * estimates are available for parameterized path generation, and also so
+ * that each rel's consider_parallel flag is set correctly before we begin to
+ * generate paths.
+ */
+static void
+set_base_rel_sizes(PlannerInfo *root)
+{
+ Index rti;
+
+ for (rti = 1; rti < root->simple_rel_array_size; rti++)
+ {
+ RelOptInfo *rel = root->simple_rel_array[rti];
+ RangeTblEntry *rte;
+
+ /* there may be empty slots corresponding to non-baserel RTEs */
+ if (rel == NULL)
+ continue;
+
+ Assert(rel->relid == rti); /* sanity check on array */
+
+ /* ignore RTEs that are "other rels" */
+ if (rel->reloptkind != RELOPT_BASEREL)
+ continue;
+
+ rte = root->simple_rte_array[rti];
+
+ /*
+ * If parallelism is allowable for this query in general, see whether
+ * it's allowable for this rel in particular. We have to do this
+ * before set_rel_size(), because (a) if this rel is an inheritance
+ * parent, set_append_rel_size() will use and perhaps change the rel's
+ * consider_parallel flag, and (b) for some RTE types, set_rel_size()
+ * goes ahead and makes paths immediately.
+ */
+ if (root->glob->parallelModeOK)
+ set_rel_consider_parallel(root, rel, rte);
+
+ set_rel_size(root, rel, rti, rte);
+ }
+}
+
+/*
+ * set_base_rel_pathlists
+ * Finds all paths available for scanning each base-relation entry.
+ * Sequential scan and any available indices are considered.
+ * Each useful path is attached to its relation's 'pathlist' field.
+ */
+static void
+set_base_rel_pathlists(PlannerInfo *root)
+{
+ Index rti;
+
+ for (rti = 1; rti < root->simple_rel_array_size; rti++)
+ {
+ RelOptInfo *rel = root->simple_rel_array[rti];
+
+ /* there may be empty slots corresponding to non-baserel RTEs */
+ if (rel == NULL)
+ continue;
+
+ Assert(rel->relid == rti); /* sanity check on array */
+
+ /* ignore RTEs that are "other rels" */
+ if (rel->reloptkind != RELOPT_BASEREL)
+ continue;
+
+ set_rel_pathlist(root, rel, rti, root->simple_rte_array[rti]);
+ }
+}
+
+/*
+ * set_rel_size
+ * Set size estimates for a base relation
+ */
+static void
+set_rel_size(PlannerInfo *root, RelOptInfo *rel,
+ Index rti, RangeTblEntry *rte)
+{
+ if (rel->reloptkind == RELOPT_BASEREL &&
+ relation_excluded_by_constraints(root, rel, rte))
+ {
+ /*
+ * We proved we don't need to scan the rel via constraint exclusion,
+ * so set up a single dummy path for it. Here we only check this for
+ * regular baserels; if it's an otherrel, CE was already checked in
+ * set_append_rel_size().
+ *
+ * In this case, we go ahead and set up the relation's path right away
+ * instead of leaving it for set_rel_pathlist to do. This is because
+ * we don't have a convention for marking a rel as dummy except by
+ * assigning a dummy path to it.
+ */
+ set_dummy_rel_pathlist(rel);
+ }
+ else if (rte->inh)
+ {
+ /* It's an "append relation", process accordingly */
+ set_append_rel_size(root, rel, rti, rte);
+ }
+ else
+ {
+ switch (rel->rtekind)
+ {
+ case RTE_RELATION:
+ if (rte->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ /* Foreign table */
+ set_foreign_size(root, rel, rte);
+ }
+ else if (rte->relkind == RELKIND_PARTITIONED_TABLE)
+ {
+ /*
+ * We could get here if asked to scan a partitioned table
+ * with ONLY. In that case we shouldn't scan any of the
+ * partitions, so mark it as a dummy rel.
+ */
+ set_dummy_rel_pathlist(rel);
+ }
+ else if (rte->tablesample != NULL)
+ {
+ /* Sampled relation */
+ set_tablesample_rel_size(root, rel, rte);
+ }
+ else
+ {
+ /* Plain relation */
+ set_plain_rel_size(root, rel, rte);
+ }
+ break;
+ case RTE_SUBQUERY:
+
+ /*
+ * Subqueries don't support making a choice between
+ * parameterized and unparameterized paths, so just go ahead
+ * and build their paths immediately.
+ */
+ set_subquery_pathlist(root, rel, rti, rte);
+ break;
+ case RTE_FUNCTION:
+ set_function_size_estimates(root, rel);
+ break;
+ case RTE_TABLEFUNC:
+ set_tablefunc_size_estimates(root, rel);
+ break;
+ case RTE_VALUES:
+ set_values_size_estimates(root, rel);
+ break;
+ case RTE_CTE:
+
+ /*
+ * CTEs don't support making a choice between parameterized
+ * and unparameterized paths, so just go ahead and build their
+ * paths immediately.
+ */
+ if (rte->self_reference)
+ set_worktable_pathlist(root, rel, rte);
+ else
+ set_cte_pathlist(root, rel, rte);
+ break;
+ case RTE_NAMEDTUPLESTORE:
+ /* Might as well just build the path immediately */
+ set_namedtuplestore_pathlist(root, rel, rte);
+ break;
+ case RTE_RESULT:
+ /* Might as well just build the path immediately */
+ set_result_pathlist(root, rel, rte);
+ break;
+ default:
+ elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
+ break;
+ }
+ }
+
+ /*
+ * We insist that all non-dummy rels have a nonzero rowcount estimate.
+ */
+ Assert(rel->rows > 0 || IS_DUMMY_REL(rel));
+}
+
+/*
+ * set_rel_pathlist
+ * Build access paths for a base relation
+ */
+static void
+set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ Index rti, RangeTblEntry *rte)
+{
+ if (IS_DUMMY_REL(rel))
+ {
+ /* We already proved the relation empty, so nothing more to do */
+ }
+ else if (rte->inh)
+ {
+ /* It's an "append relation", process accordingly */
+ set_append_rel_pathlist(root, rel, rti, rte);
+ }
+ else
+ {
+ switch (rel->rtekind)
+ {
+ case RTE_RELATION:
+ if (rte->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ /* Foreign table */
+ set_foreign_pathlist(root, rel, rte);
+ }
+ else if (rte->tablesample != NULL)
+ {
+ /* Sampled relation */
+ set_tablesample_rel_pathlist(root, rel, rte);
+ }
+ else
+ {
+ /* Plain relation */
+ set_plain_rel_pathlist(root, rel, rte);
+ }
+ break;
+ case RTE_SUBQUERY:
+ /* Subquery --- fully handled during set_rel_size */
+ break;
+ case RTE_FUNCTION:
+ /* RangeFunction */
+ set_function_pathlist(root, rel, rte);
+ break;
+ case RTE_TABLEFUNC:
+ /* Table Function */
+ set_tablefunc_pathlist(root, rel, rte);
+ break;
+ case RTE_VALUES:
+ /* Values list */
+ set_values_pathlist(root, rel, rte);
+ break;
+ case RTE_CTE:
+ /* CTE reference --- fully handled during set_rel_size */
+ break;
+ case RTE_NAMEDTUPLESTORE:
+ /* tuplestore reference --- fully handled during set_rel_size */
+ break;
+ case RTE_RESULT:
+ /* simple Result --- fully handled during set_rel_size */
+ break;
+ default:
+ elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
+ break;
+ }
+ }
+
+ /*
+ * Allow a plugin to editorialize on the set of Paths for this base
+ * relation. It could add new paths (such as CustomPaths) by calling
+ * add_path(), or add_partial_path() if parallel aware. It could also
+ * delete or modify paths added by the core code.
+ */
+ if (set_rel_pathlist_hook)
+ (*set_rel_pathlist_hook) (root, rel, rti, rte);
+
+ /*
+ * If this is a baserel, we should normally consider gathering any partial
+ * paths we may have created for it. We have to do this after calling the
+ * set_rel_pathlist_hook, else it cannot add partial paths to be included
+ * here.
+ *
+ * However, if this is an inheritance child, skip it. Otherwise, we could
+ * end up with a very large number of gather nodes, each trying to grab
+ * its own pool of workers. Instead, we'll consider gathering partial
+ * paths for the parent appendrel.
+ *
+ * Also, if this is the topmost scan/join rel, we postpone gathering until
+ * the final scan/join targetlist is available (see grouping_planner).
+ */
+ if (rel->reloptkind == RELOPT_BASEREL &&
+ !bms_equal(rel->relids, root->all_baserels))
+ generate_useful_gather_paths(root, rel, false);
+
+ /* Now find the cheapest of the paths for this rel */
+ set_cheapest(rel);
+
+#ifdef OPTIMIZER_DEBUG
+ debug_print_rel(root, rel);
+#endif
+}
+
+/*
+ * set_plain_rel_size
+ * Set size estimates for a plain relation (no subquery, no inheritance)
+ */
+static void
+set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
+{
+ /*
+ * Test any partial indexes of rel for applicability. We must do this
+ * first since partial unique indexes can affect size estimates.
+ */
+ check_index_predicates(root, rel);
+
+ /* Mark rel with estimated output rows, width, etc */
+ set_baserel_size_estimates(root, rel);
+}
+
+/*
+ * If this relation could possibly be scanned from within a worker, then set
+ * its consider_parallel flag.
+ */
+static void
+set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte)
+{
+ /*
+ * The flag has previously been initialized to false, so we can just
+ * return if it becomes clear that we can't safely set it.
+ */
+ Assert(!rel->consider_parallel);
+
+ /* Don't call this if parallelism is disallowed for the entire query. */
+ Assert(root->glob->parallelModeOK);
+
+ /* This should only be called for baserels and appendrel children. */
+ Assert(IS_SIMPLE_REL(rel));
+
+ /* Assorted checks based on rtekind. */
+ switch (rte->rtekind)
+ {
+ case RTE_RELATION:
+
+ /*
+ * Currently, parallel workers can't access the leader's temporary
+ * tables. We could possibly relax this if we wrote all of its
+ * local buffers at the start of the query and made no changes
+ * thereafter (maybe we could allow hint bit changes), and if we
+ * taught the workers to read them. Writing a large number of
+ * temporary buffers could be expensive, though, and we don't have
+ * the rest of the necessary infrastructure right now anyway. So
+ * for now, bail out if we see a temporary table.
+ */
+ if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP)
+ return;
+
+ /*
+ * Table sampling can be pushed down to workers if the sample
+ * function and its arguments are safe.
+ */
+ if (rte->tablesample != NULL)
+ {
+ char proparallel = func_parallel(rte->tablesample->tsmhandler);
+
+ if (proparallel != PROPARALLEL_SAFE)
+ return;
+ if (!is_parallel_safe(root, (Node *) rte->tablesample->args))
+ return;
+ }
+
+ /*
+ * Ask FDWs whether they can support performing a ForeignScan
+ * within a worker. Most often, the answer will be no. For
+ * example, if the nature of the FDW is such that it opens a TCP
+ * connection with a remote server, each parallel worker would end
+ * up with a separate connection, and these connections might not
+ * be appropriately coordinated between workers and the leader.
+ */
+ if (rte->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ Assert(rel->fdwroutine);
+ if (!rel->fdwroutine->IsForeignScanParallelSafe)
+ return;
+ if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte))
+ return;
+ }
+
+ /*
+ * There are additional considerations for appendrels, which we'll
+ * deal with in set_append_rel_size and set_append_rel_pathlist.
+ * For now, just set consider_parallel based on the rel's own
+ * quals and targetlist.
+ */
+ break;
+
+ case RTE_SUBQUERY:
+
+ /*
+ * There's no intrinsic problem with scanning a subquery-in-FROM
+ * (as distinct from a SubPlan or InitPlan) in a parallel worker.
+ * If the subquery doesn't happen to have any parallel-safe paths,
+ * then flagging it as consider_parallel won't change anything,
+ * but that's true for plain tables, too. We must set
+ * consider_parallel based on the rel's own quals and targetlist,
+ * so that if a subquery path is parallel-safe but the quals and
+ * projection we're sticking onto it are not, we correctly mark
+ * the SubqueryScanPath as not parallel-safe. (Note that
+ * set_subquery_pathlist() might push some of these quals down
+ * into the subquery itself, but that doesn't change anything.)
+ *
+ * We can't push sub-select containing LIMIT/OFFSET to workers as
+ * there is no guarantee that the row order will be fully
+ * deterministic, and applying LIMIT/OFFSET will lead to
+ * inconsistent results at the top-level. (In some cases, where
+ * the result is ordered, we could relax this restriction. But it
+ * doesn't currently seem worth expending extra effort to do so.)
+ */
+ {
+ Query *subquery = castNode(Query, rte->subquery);
+
+ if (limit_needed(subquery))
+ return;
+ }
+ break;
+
+ case RTE_JOIN:
+ /* Shouldn't happen; we're only considering baserels here. */
+ Assert(false);
+ return;
+
+ case RTE_FUNCTION:
+ /* Check for parallel-restricted functions. */
+ if (!is_parallel_safe(root, (Node *) rte->functions))
+ return;
+ break;
+
+ case RTE_TABLEFUNC:
+ /* not parallel safe */
+ return;
+
+ case RTE_VALUES:
+ /* Check for parallel-restricted functions. */
+ if (!is_parallel_safe(root, (Node *) rte->values_lists))
+ return;
+ break;
+
+ case RTE_CTE:
+
+ /*
+ * CTE tuplestores aren't shared among parallel workers, so we
+ * force all CTE scans to happen in the leader. Also, populating
+ * the CTE would require executing a subplan that's not available
+ * in the worker, might be parallel-restricted, and must get
+ * executed only once.
+ */
+ return;
+
+ case RTE_NAMEDTUPLESTORE:
+
+ /*
+ * tuplestore cannot be shared, at least without more
+ * infrastructure to support that.
+ */
+ return;
+
+ case RTE_RESULT:
+ /* RESULT RTEs, in themselves, are no problem. */
+ break;
+ }
+
+ /*
+ * If there's anything in baserestrictinfo that's parallel-restricted, we
+ * give up on parallelizing access to this relation. We could consider
+ * instead postponing application of the restricted quals until we're
+ * above all the parallelism in the plan tree, but it's not clear that
+ * that would be a win in very many cases, and it might be tricky to make
+ * outer join clauses work correctly. It would likely break equivalence
+ * classes, too.
+ */
+ if (!is_parallel_safe(root, (Node *) rel->baserestrictinfo))
+ return;
+
+ /*
+ * Likewise, if the relation's outputs are not parallel-safe, give up.
+ * (Usually, they're just Vars, but sometimes they're not.)
+ */
+ if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs))
+ return;
+
+ /* We have a winner. */
+ rel->consider_parallel = true;
+}
+
+/*
+ * set_plain_rel_pathlist
+ * Build access paths for a plain relation (no subquery, no inheritance)
+ */
+static void
+set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
+{
+ Relids required_outer;
+
+ /*
+ * We don't support pushing join clauses into the quals of a seqscan, but
+ * it could still have required parameterization due to LATERAL refs in
+ * its tlist.
+ */
+ required_outer = rel->lateral_relids;
+
+ /* Consider sequential scan */
+ add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
+
+ /* If appropriate, consider parallel sequential scan */
+ if (rel->consider_parallel && required_outer == NULL)
+ create_plain_partial_paths(root, rel);
+
+ /* Consider index scans */
+ create_index_paths(root, rel);
+
+ /* Consider TID scans */
+ create_tidscan_paths(root, rel);
+}
+
+/*
+ * create_plain_partial_paths
+ * Build partial access paths for parallel scan of a plain relation
+ */
+static void
+create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
+{
+ int parallel_workers;
+
+ parallel_workers = compute_parallel_worker(rel, rel->pages, -1,
+ max_parallel_workers_per_gather);
+
+ /* If any limit was set to zero, the user doesn't want a parallel scan. */
+ if (parallel_workers <= 0)
+ return;
+
+ /* Add an unordered partial path based on a parallel sequential scan. */
+ add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
+}
+
+/*
+ * set_tablesample_rel_size
+ * Set size estimates for a sampled relation
+ */
+static void
+set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
+{
+ TableSampleClause *tsc = rte->tablesample;
+ TsmRoutine *tsm;
+ BlockNumber pages;
+ double tuples;
+
+ /*
+ * Test any partial indexes of rel for applicability. We must do this
+ * first since partial unique indexes can affect size estimates.
+ */
+ check_index_predicates(root, rel);
+
+ /*
+ * Call the sampling method's estimation function to estimate the number
+ * of pages it will read and the number of tuples it will return. (Note:
+ * we assume the function returns sane values.)
+ */
+ tsm = GetTsmRoutine(tsc->tsmhandler);
+ tsm->SampleScanGetSampleSize(root, rel, tsc->args,
+ &pages, &tuples);
+
+ /*
+ * For the moment, because we will only consider a SampleScan path for the
+ * rel, it's okay to just overwrite the pages and tuples estimates for the
+ * whole relation. If we ever consider multiple path types for sampled
+ * rels, we'll need more complication.
+ */
+ rel->pages = pages;
+ rel->tuples = tuples;
+
+ /* Mark rel with estimated output rows, width, etc */
+ set_baserel_size_estimates(root, rel);
+}
+
+/*
+ * set_tablesample_rel_pathlist
+ * Build access paths for a sampled relation
+ */
+static void
+set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
+{
+ Relids required_outer;
+ Path *path;
+
+ /*
+ * We don't support pushing join clauses into the quals of a samplescan,
+ * but it could still have required parameterization due to LATERAL refs
+ * in its tlist or TABLESAMPLE arguments.
+ */
+ required_outer = rel->lateral_relids;
+
+ /* Consider sampled scan */
+ path = create_samplescan_path(root, rel, required_outer);
+
+ /*
+ * If the sampling method does not support repeatable scans, we must avoid
+ * plans that would scan the rel multiple times. Ideally, we'd simply
+ * avoid putting the rel on the inside of a nestloop join; but adding such
+ * a consideration to the planner seems like a great deal of complication
+ * to support an uncommon usage of second-rate sampling methods. Instead,
+ * if there is a risk that the query might perform an unsafe join, just
+ * wrap the SampleScan in a Materialize node. We can check for joins by
+ * counting the membership of all_baserels (note that this correctly
+ * counts inheritance trees as single rels). If we're inside a subquery,
+ * we can't easily check whether a join might occur in the outer query, so
+ * just assume one is possible.
+ *
+ * GetTsmRoutine is relatively expensive compared to the other tests here,
+ * so check repeatable_across_scans last, even though that's a bit odd.
+ */
+ if ((root->query_level > 1 ||
+ bms_membership(root->all_baserels) != BMS_SINGLETON) &&
+ !(GetTsmRoutine(rte->tablesample->tsmhandler)->repeatable_across_scans))
+ {
+ path = (Path *) create_material_path(rel, path);
+ }
+
+ add_path(rel, path);
+
+ /* For the moment, at least, there are no other paths to consider */
+}
+
+/*
+ * set_foreign_size
+ * Set size estimates for a foreign table RTE
+ */
+static void
+set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
+{
+ /* Mark rel with estimated output rows, width, etc */
+ set_foreign_size_estimates(root, rel);
+
+ /* Let FDW adjust the size estimates, if it can */
+ rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid);
+
+ /* ... but do not let it set the rows estimate to zero */
+ rel->rows = clamp_row_est(rel->rows);
+
+ /*
+ * Also, make sure rel->tuples is not insane relative to rel->rows.
+ * Notably, this ensures sanity if pg_class.reltuples contains -1 and the
+ * FDW doesn't do anything to replace that.
+ */
+ rel->tuples = Max(rel->tuples, rel->rows);
+}
+
+/*
+ * set_foreign_pathlist
+ * Build access paths for a foreign table RTE
+ */
+static void
+set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
+{
+ /* Call the FDW's GetForeignPaths function to generate path(s) */
+ rel->fdwroutine->GetForeignPaths(root, rel, rte->relid);
+}
+
+/*
+ * set_append_rel_size
+ * Set size estimates for a simple "append relation"
+ *
+ * The passed-in rel and RTE represent the entire append relation. The
+ * relation's contents are computed by appending together the output of the
+ * individual member relations. Note that in the non-partitioned inheritance
+ * case, the first member relation is actually the same table as is mentioned
+ * in the parent RTE ... but it has a different RTE and RelOptInfo. This is
+ * a good thing because their outputs are not the same size.
+ */
+static void
+set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
+ Index rti, RangeTblEntry *rte)
+{
+ int parentRTindex = rti;
+ bool has_live_children;
+ double parent_rows;
+ double parent_size;
+ double *parent_attrsizes;
+ int nattrs;
+ ListCell *l;
+
+ /* Guard against stack overflow due to overly deep inheritance tree. */
+ check_stack_depth();
+
+ Assert(IS_SIMPLE_REL(rel));
+
+ /*
+ * If this is a partitioned baserel, set the consider_partitionwise_join
+ * flag; currently, we only consider partitionwise joins with the baserel
+ * if its targetlist doesn't contain a whole-row Var.
+ */
+ if (enable_partitionwise_join &&
+ rel->reloptkind == RELOPT_BASEREL &&
+ rte->relkind == RELKIND_PARTITIONED_TABLE &&
+ rel->attr_needed[InvalidAttrNumber - rel->min_attr] == NULL)
+ rel->consider_partitionwise_join = true;
+
+ /*
+ * Initialize to compute size estimates for whole append relation.
+ *
+ * We handle width estimates by weighting the widths of different child
+ * rels proportionally to their number of rows. This is sensible because
+ * the use of width estimates is mainly to compute the total relation
+ * "footprint" if we have to sort or hash it. To do this, we sum the
+ * total equivalent size (in "double" arithmetic) and then divide by the
+ * total rowcount estimate. This is done separately for the total rel
+ * width and each attribute.
+ *
+ * Note: if you consider changing this logic, beware that child rels could
+ * have zero rows and/or width, if they were excluded by constraints.
+ */
+ has_live_children = false;
+ parent_rows = 0;
+ parent_size = 0;
+ nattrs = rel->max_attr - rel->min_attr + 1;
+ parent_attrsizes = (double *) palloc0(nattrs * sizeof(double));
+
+ foreach(l, root->append_rel_list)
+ {
+ AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
+ int childRTindex;
+ RangeTblEntry *childRTE;
+ RelOptInfo *childrel;
+ ListCell *parentvars;
+ ListCell *childvars;
+
+ /* append_rel_list contains all append rels; ignore others */
+ if (appinfo->parent_relid != parentRTindex)
+ continue;
+
+ childRTindex = appinfo->child_relid;
+ childRTE = root->simple_rte_array[childRTindex];
+
+ /*
+ * The child rel's RelOptInfo was already created during
+ * add_other_rels_to_query.
+ */
+ childrel = find_base_rel(root, childRTindex);
+ Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL);
+
+ /* We may have already proven the child to be dummy. */
+ if (IS_DUMMY_REL(childrel))
+ continue;
+
+ /*
+ * We have to copy the parent's targetlist and quals to the child,
+ * with appropriate substitution of variables. However, the
+ * baserestrictinfo quals were already copied/substituted when the
+ * child RelOptInfo was built. So we don't need any additional setup
+ * before applying constraint exclusion.
+ */
+ if (relation_excluded_by_constraints(root, childrel, childRTE))
+ {
+ /*
+ * This child need not be scanned, so we can omit it from the
+ * appendrel.
+ */
+ set_dummy_rel_pathlist(childrel);
+ continue;
+ }
+
+ /*
+ * Constraint exclusion failed, so copy the parent's join quals and
+ * targetlist to the child, with appropriate variable substitutions.
+ *
+ * NB: the resulting childrel->reltarget->exprs may contain arbitrary
+ * expressions, which otherwise would not occur in a rel's targetlist.
+ * Code that might be looking at an appendrel child must cope with
+ * such. (Normally, a rel's targetlist would only include Vars and
+ * PlaceHolderVars.) XXX we do not bother to update the cost or width
+ * fields of childrel->reltarget; not clear if that would be useful.
+ */
+ childrel->joininfo = (List *)
+ adjust_appendrel_attrs(root,
+ (Node *) rel->joininfo,
+ 1, &appinfo);
+ childrel->reltarget->exprs = (List *)
+ adjust_appendrel_attrs(root,
+ (Node *) rel->reltarget->exprs,
+ 1, &appinfo);
+
+ /*
+ * We have to make child entries in the EquivalenceClass data
+ * structures as well. This is needed either if the parent
+ * participates in some eclass joins (because we will want to consider
+ * inner-indexscan joins on the individual children) or if the parent
+ * has useful pathkeys (because we should try to build MergeAppend
+ * paths that produce those sort orderings).
+ */
+ if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
+ add_child_rel_equivalences(root, appinfo, rel, childrel);
+ childrel->has_eclass_joins = rel->has_eclass_joins;
+
+ /*
+ * Note: we could compute appropriate attr_needed data for the child's
+ * variables, by transforming the parent's attr_needed through the
+ * translated_vars mapping. However, currently there's no need
+ * because attr_needed is only examined for base relations not
+ * otherrels. So we just leave the child's attr_needed empty.
+ */
+
+ /*
+ * If we consider partitionwise joins with the parent rel, do the same
+ * for partitioned child rels.
+ *
+ * Note: here we abuse the consider_partitionwise_join flag by setting
+ * it for child rels that are not themselves partitioned. We do so to
+ * tell try_partitionwise_join() that the child rel is sufficiently
+ * valid to be used as a per-partition input, even if it later gets
+ * proven to be dummy. (It's not usable until we've set up the
+ * reltarget and EC entries, which we just did.)
+ */
+ if (rel->consider_partitionwise_join)
+ childrel->consider_partitionwise_join = true;
+
+ /*
+ * If parallelism is allowable for this query in general, see whether
+ * it's allowable for this childrel in particular. But if we've
+ * already decided the appendrel is not parallel-safe as a whole,
+ * there's no point in considering parallelism for this child. For
+ * consistency, do this before calling set_rel_size() for the child.
+ */
+ if (root->glob->parallelModeOK && rel->consider_parallel)
+ set_rel_consider_parallel(root, childrel, childRTE);
+
+ /*
+ * Compute the child's size.
+ */
+ set_rel_size(root, childrel, childRTindex, childRTE);
+
+ /*
+ * It is possible that constraint exclusion detected a contradiction
+ * within a child subquery, even though we didn't prove one above. If
+ * so, we can skip this child.
+ */
+ if (IS_DUMMY_REL(childrel))
+ continue;
+
+ /* We have at least one live child. */
+ has_live_children = true;
+
+ /*
+ * If any live child is not parallel-safe, treat the whole appendrel
+ * as not parallel-safe. In future we might be able to generate plans
+ * in which some children are farmed out to workers while others are
+ * not; but we don't have that today, so it's a waste to consider
+ * partial paths anywhere in the appendrel unless it's all safe.
+ * (Child rels visited before this one will be unmarked in
+ * set_append_rel_pathlist().)
+ */
+ if (!childrel->consider_parallel)
+ rel->consider_parallel = false;
+
+ /*
+ * Accumulate size information from each live child.
+ */
+ Assert(childrel->rows > 0);
+
+ parent_rows += childrel->rows;
+ parent_size += childrel->reltarget->width * childrel->rows;
+
+ /*
+ * Accumulate per-column estimates too. We need not do anything for
+ * PlaceHolderVars in the parent list. If child expression isn't a
+ * Var, or we didn't record a width estimate for it, we have to fall
+ * back on a datatype-based estimate.
+ *
+ * By construction, child's targetlist is 1-to-1 with parent's.
+ */
+ forboth(parentvars, rel->reltarget->exprs,
+ childvars, childrel->reltarget->exprs)
+ {
+ Var *parentvar = (Var *) lfirst(parentvars);
+ Node *childvar = (Node *) lfirst(childvars);
+
+ if (IsA(parentvar, Var) && parentvar->varno == parentRTindex)
+ {
+ int pndx = parentvar->varattno - rel->min_attr;
+ int32 child_width = 0;
+
+ if (IsA(childvar, Var) &&
+ ((Var *) childvar)->varno == childrel->relid)
+ {
+ int cndx = ((Var *) childvar)->varattno - childrel->min_attr;
+
+ child_width = childrel->attr_widths[cndx];
+ }
+ if (child_width <= 0)
+ child_width = get_typavgwidth(exprType(childvar),
+ exprTypmod(childvar));
+ Assert(child_width > 0);
+ parent_attrsizes[pndx] += child_width * childrel->rows;
+ }
+ }
+ }
+
+ if (has_live_children)
+ {
+ /*
+ * Save the finished size estimates.
+ */
+ int i;
+
+ Assert(parent_rows > 0);
+ rel->rows = parent_rows;
+ rel->reltarget->width = rint(parent_size / parent_rows);
+ for (i = 0; i < nattrs; i++)
+ rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
+
+ /*
+ * Set "raw tuples" count equal to "rows" for the appendrel; needed
+ * because some places assume rel->tuples is valid for any baserel.
+ */
+ rel->tuples = parent_rows;
+
+ /*
+ * Note that we leave rel->pages as zero; this is important to avoid
+ * double-counting the appendrel tree in total_table_pages.
+ */
+ }
+ else
+ {
+ /*
+ * All children were excluded by constraints, so mark the whole
+ * appendrel dummy. We must do this in this phase so that the rel's
+ * dummy-ness is visible when we generate paths for other rels.
+ */
+ set_dummy_rel_pathlist(rel);
+ }
+
+ pfree(parent_attrsizes);
+}
+
+/*
+ * set_append_rel_pathlist
+ * Build access paths for an "append relation"
+ */
+static void
+set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ Index rti, RangeTblEntry *rte)
+{
+ int parentRTindex = rti;
+ List *live_childrels = NIL;
+ ListCell *l;
+
+ /*
+ * Generate access paths for each member relation, and remember the
+ * non-dummy children.
+ */
+ foreach(l, root->append_rel_list)
+ {
+ AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
+ int childRTindex;
+ RangeTblEntry *childRTE;
+ RelOptInfo *childrel;
+
+ /* append_rel_list contains all append rels; ignore others */
+ if (appinfo->parent_relid != parentRTindex)
+ continue;
+
+ /* Re-locate the child RTE and RelOptInfo */
+ childRTindex = appinfo->child_relid;
+ childRTE = root->simple_rte_array[childRTindex];
+ childrel = root->simple_rel_array[childRTindex];
+
+ /*
+ * If set_append_rel_size() decided the parent appendrel was
+ * parallel-unsafe at some point after visiting this child rel, we
+ * need to propagate the unsafety marking down to the child, so that
+ * we don't generate useless partial paths for it.
+ */
+ if (!rel->consider_parallel)
+ childrel->consider_parallel = false;
+
+ /*
+ * Compute the child's access paths.
+ */
+ set_rel_pathlist(root, childrel, childRTindex, childRTE);
+
+ /*
+ * If child is dummy, ignore it.
+ */
+ if (IS_DUMMY_REL(childrel))
+ continue;
+
+ /*
+ * Child is live, so add it to the live_childrels list for use below.
+ */
+ live_childrels = lappend(live_childrels, childrel);
+ }
+
+ /* Add paths to the append relation. */
+ add_paths_to_append_rel(root, rel, live_childrels);
+}
+
+
+/*
+ * add_paths_to_append_rel
+ * Generate paths for the given append relation given the set of non-dummy
+ * child rels.
+ *
+ * The function collects all parameterizations and orderings supported by the
+ * non-dummy children. For every such parameterization or ordering, it creates
+ * an append path collecting one path from each non-dummy child with given
+ * parameterization or ordering. Similarly it collects partial paths from
+ * non-dummy children to create partial append paths.
+ */
+void
+add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
+ List *live_childrels)
+{
+ List *subpaths = NIL;
+ bool subpaths_valid = true;
+ List *partial_subpaths = NIL;
+ List *pa_partial_subpaths = NIL;
+ List *pa_nonpartial_subpaths = NIL;
+ bool partial_subpaths_valid = true;
+ bool pa_subpaths_valid;
+ List *all_child_pathkeys = NIL;
+ List *all_child_outers = NIL;
+ ListCell *l;
+ double partial_rows = -1;
+
+ /* If appropriate, consider parallel append */
+ pa_subpaths_valid = enable_parallel_append && rel->consider_parallel;
+
+ /*
+ * For every non-dummy child, remember the cheapest path. Also, identify
+ * all pathkeys (orderings) and parameterizations (required_outer sets)
+ * available for the non-dummy member relations.
+ */
+ foreach(l, live_childrels)
+ {
+ RelOptInfo *childrel = lfirst(l);
+ ListCell *lcp;
+ Path *cheapest_partial_path = NULL;
+
+ /*
+ * If child has an unparameterized cheapest-total path, add that to
+ * the unparameterized Append path we are constructing for the parent.
+ * If not, there's no workable unparameterized path.
+ *
+ * With partitionwise aggregates, the child rel's pathlist may be
+ * empty, so don't assume that a path exists here.
+ */
+ if (childrel->pathlist != NIL &&
+ childrel->cheapest_total_path->param_info == NULL)
+ accumulate_append_subpath(childrel->cheapest_total_path,
+ &subpaths, NULL);
+ else
+ subpaths_valid = false;
+
+ /* Same idea, but for a partial plan. */
+ if (childrel->partial_pathlist != NIL)
+ {
+ cheapest_partial_path = linitial(childrel->partial_pathlist);
+ accumulate_append_subpath(cheapest_partial_path,
+ &partial_subpaths, NULL);
+ }
+ else
+ partial_subpaths_valid = false;
+
+ /*
+ * Same idea, but for a parallel append mixing partial and non-partial
+ * paths.
+ */
+ if (pa_subpaths_valid)
+ {
+ Path *nppath = NULL;
+
+ nppath =
+ get_cheapest_parallel_safe_total_inner(childrel->pathlist);
+
+ if (cheapest_partial_path == NULL && nppath == NULL)
+ {
+ /* Neither a partial nor a parallel-safe path? Forget it. */
+ pa_subpaths_valid = false;
+ }
+ else if (nppath == NULL ||
+ (cheapest_partial_path != NULL &&
+ cheapest_partial_path->total_cost < nppath->total_cost))
+ {
+ /* Partial path is cheaper or the only option. */
+ Assert(cheapest_partial_path != NULL);
+ accumulate_append_subpath(cheapest_partial_path,
+ &pa_partial_subpaths,
+ &pa_nonpartial_subpaths);
+ }
+ else
+ {
+ /*
+ * Either we've got only a non-partial path, or we think that
+ * a single backend can execute the best non-partial path
+ * faster than all the parallel backends working together can
+ * execute the best partial path.
+ *
+ * It might make sense to be more aggressive here. Even if
+ * the best non-partial path is more expensive than the best
+ * partial path, it could still be better to choose the
+ * non-partial path if there are several such paths that can
+ * be given to different workers. For now, we don't try to
+ * figure that out.
+ */
+ accumulate_append_subpath(nppath,
+ &pa_nonpartial_subpaths,
+ NULL);
+ }
+ }
+
+ /*
+ * Collect lists of all the available path orderings and
+ * parameterizations for all the children. We use these as a
+ * heuristic to indicate which sort orderings and parameterizations we
+ * should build Append and MergeAppend paths for.
+ */
+ foreach(lcp, childrel->pathlist)
+ {
+ Path *childpath = (Path *) lfirst(lcp);
+ List *childkeys = childpath->pathkeys;
+ Relids childouter = PATH_REQ_OUTER(childpath);
+
+ /* Unsorted paths don't contribute to pathkey list */
+ if (childkeys != NIL)
+ {
+ ListCell *lpk;
+ bool found = false;
+
+ /* Have we already seen this ordering? */
+ foreach(lpk, all_child_pathkeys)
+ {
+ List *existing_pathkeys = (List *) lfirst(lpk);
+
+ if (compare_pathkeys(existing_pathkeys,
+ childkeys) == PATHKEYS_EQUAL)
+ {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ {
+ /* No, so add it to all_child_pathkeys */
+ all_child_pathkeys = lappend(all_child_pathkeys,
+ childkeys);
+ }
+ }
+
+ /* Unparameterized paths don't contribute to param-set list */
+ if (childouter)
+ {
+ ListCell *lco;
+ bool found = false;
+
+ /* Have we already seen this param set? */
+ foreach(lco, all_child_outers)
+ {
+ Relids existing_outers = (Relids) lfirst(lco);
+
+ if (bms_equal(existing_outers, childouter))
+ {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ {
+ /* No, so add it to all_child_outers */
+ all_child_outers = lappend(all_child_outers,
+ childouter);
+ }
+ }
+ }
+ }
+
+ /*
+ * If we found unparameterized paths for all children, build an unordered,
+ * unparameterized Append path for the rel. (Note: this is correct even
+ * if we have zero or one live subpath due to constraint exclusion.)
+ */
+ if (subpaths_valid)
+ add_path(rel, (Path *) create_append_path(root, rel, subpaths, NIL,
+ NIL, NULL, 0, false,
+ -1));
+
+ /*
+ * Consider an append of unordered, unparameterized partial paths. Make
+ * it parallel-aware if possible.
+ */
+ if (partial_subpaths_valid && partial_subpaths != NIL)
+ {
+ AppendPath *appendpath;
+ ListCell *lc;
+ int parallel_workers = 0;
+
+ /* Find the highest number of workers requested for any subpath. */
+ foreach(lc, partial_subpaths)
+ {
+ Path *path = lfirst(lc);
+
+ parallel_workers = Max(parallel_workers, path->parallel_workers);
+ }
+ Assert(parallel_workers > 0);
+
+ /*
+ * If the use of parallel append is permitted, always request at least
+ * log2(# of children) workers. We assume it can be useful to have
+ * extra workers in this case because they will be spread out across
+ * the children. The precise formula is just a guess, but we don't
+ * want to end up with a radically different answer for a table with N
+ * partitions vs. an unpartitioned table with the same data, so the
+ * use of some kind of log-scaling here seems to make some sense.
+ */
+ if (enable_parallel_append)
+ {
+ parallel_workers = Max(parallel_workers,
+ fls(list_length(live_childrels)));
+ parallel_workers = Min(parallel_workers,
+ max_parallel_workers_per_gather);
+ }
+ Assert(parallel_workers > 0);
+
+ /* Generate a partial append path. */
+ appendpath = create_append_path(root, rel, NIL, partial_subpaths,
+ NIL, NULL, parallel_workers,
+ enable_parallel_append,
+ -1);
+
+ /*
+ * Make sure any subsequent partial paths use the same row count
+ * estimate.
+ */
+ partial_rows = appendpath->path.rows;
+
+ /* Add the path. */
+ add_partial_path(rel, (Path *) appendpath);
+ }
+
+ /*
+ * Consider a parallel-aware append using a mix of partial and non-partial
+ * paths. (This only makes sense if there's at least one child which has
+ * a non-partial path that is substantially cheaper than any partial path;
+ * otherwise, we should use the append path added in the previous step.)
+ */
+ if (pa_subpaths_valid && pa_nonpartial_subpaths != NIL)
+ {
+ AppendPath *appendpath;
+ ListCell *lc;
+ int parallel_workers = 0;
+
+ /*
+ * Find the highest number of workers requested for any partial
+ * subpath.
+ */
+ foreach(lc, pa_partial_subpaths)
+ {
+ Path *path = lfirst(lc);
+
+ parallel_workers = Max(parallel_workers, path->parallel_workers);
+ }
+
+ /*
+ * Same formula here as above. It's even more important in this
+ * instance because the non-partial paths won't contribute anything to
+ * the planned number of parallel workers.
+ */
+ parallel_workers = Max(parallel_workers,
+ fls(list_length(live_childrels)));
+ parallel_workers = Min(parallel_workers,
+ max_parallel_workers_per_gather);
+ Assert(parallel_workers > 0);
+
+ appendpath = create_append_path(root, rel, pa_nonpartial_subpaths,
+ pa_partial_subpaths,
+ NIL, NULL, parallel_workers, true,
+ partial_rows);
+ add_partial_path(rel, (Path *) appendpath);
+ }
+
+ /*
+ * Also build unparameterized ordered append paths based on the collected
+ * list of child pathkeys.
+ */
+ if (subpaths_valid)
+ generate_orderedappend_paths(root, rel, live_childrels,
+ all_child_pathkeys);
+
+ /*
+ * Build Append paths for each parameterization seen among the child rels.
+ * (This may look pretty expensive, but in most cases of practical
+ * interest, the child rels will expose mostly the same parameterizations,
+ * so that not that many cases actually get considered here.)
+ *
+ * The Append node itself cannot enforce quals, so all qual checking must
+ * be done in the child paths. This means that to have a parameterized
+ * Append path, we must have the exact same parameterization for each
+ * child path; otherwise some children might be failing to check the
+ * moved-down quals. To make them match up, we can try to increase the
+ * parameterization of lesser-parameterized paths.
+ */
+ foreach(l, all_child_outers)
+ {
+ Relids required_outer = (Relids) lfirst(l);
+ ListCell *lcr;
+
+ /* Select the child paths for an Append with this parameterization */
+ subpaths = NIL;
+ subpaths_valid = true;
+ foreach(lcr, live_childrels)
+ {
+ RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
+ Path *subpath;
+
+ if (childrel->pathlist == NIL)
+ {
+ /* failed to make a suitable path for this child */
+ subpaths_valid = false;
+ break;
+ }
+
+ subpath = get_cheapest_parameterized_child_path(root,
+ childrel,
+ required_outer);
+ if (subpath == NULL)
+ {
+ /* failed to make a suitable path for this child */
+ subpaths_valid = false;
+ break;
+ }
+ accumulate_append_subpath(subpath, &subpaths, NULL);
+ }
+
+ if (subpaths_valid)
+ add_path(rel, (Path *)
+ create_append_path(root, rel, subpaths, NIL,
+ NIL, required_outer, 0, false,
+ -1));
+ }
+
+ /*
+ * When there is only a single child relation, the Append path can inherit
+ * any ordering available for the child rel's path, so that it's useful to
+ * consider ordered partial paths. Above we only considered the cheapest
+ * partial path for each child, but let's also make paths using any
+ * partial paths that have pathkeys.
+ */
+ if (list_length(live_childrels) == 1)
+ {
+ RelOptInfo *childrel = (RelOptInfo *) linitial(live_childrels);
+
+ /* skip the cheapest partial path, since we already used that above */
+ for_each_from(l, childrel->partial_pathlist, 1)
+ {
+ Path *path = (Path *) lfirst(l);
+ AppendPath *appendpath;
+
+ /* skip paths with no pathkeys. */
+ if (path->pathkeys == NIL)
+ continue;
+
+ appendpath = create_append_path(root, rel, NIL, list_make1(path),
+ NIL, NULL,
+ path->parallel_workers, true,
+ partial_rows);
+ add_partial_path(rel, (Path *) appendpath);
+ }
+ }
+}
+
+/*
+ * generate_orderedappend_paths
+ * Generate ordered append paths for an append relation
+ *
+ * Usually we generate MergeAppend paths here, but there are some special
+ * cases where we can generate simple Append paths, because the subpaths
+ * can provide tuples in the required order already.
+ *
+ * We generate a path for each ordering (pathkey list) appearing in
+ * all_child_pathkeys.
+ *
+ * We consider both cheapest-startup and cheapest-total cases, ie, for each
+ * interesting ordering, collect all the cheapest startup subpaths and all the
+ * cheapest total paths, and build a suitable path for each case.
+ *
+ * We don't currently generate any parameterized ordered paths here. While
+ * it would not take much more code here to do so, it's very unclear that it
+ * is worth the planning cycles to investigate such paths: there's little
+ * use for an ordered path on the inside of a nestloop. In fact, it's likely
+ * that the current coding of add_path would reject such paths out of hand,
+ * because add_path gives no credit for sort ordering of parameterized paths,
+ * and a parameterized MergeAppend is going to be more expensive than the
+ * corresponding parameterized Append path. If we ever try harder to support
+ * parameterized mergejoin plans, it might be worth adding support for
+ * parameterized paths here to feed such joins. (See notes in
+ * optimizer/README for why that might not ever happen, though.)
+ */
+static void
+generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
+ List *live_childrels,
+ List *all_child_pathkeys)
+{
+ ListCell *lcp;
+ List *partition_pathkeys = NIL;
+ List *partition_pathkeys_desc = NIL;
+ bool partition_pathkeys_partial = true;
+ bool partition_pathkeys_desc_partial = true;
+
+ /*
+ * Some partitioned table setups may allow us to use an Append node
+ * instead of a MergeAppend. This is possible in cases such as RANGE
+ * partitioned tables where it's guaranteed that an earlier partition must
+ * contain rows which come earlier in the sort order. To detect whether
+ * this is relevant, build pathkey descriptions of the partition ordering,
+ * for both forward and reverse scans.
+ */
+ if (rel->part_scheme != NULL && IS_SIMPLE_REL(rel) &&
+ partitions_are_ordered(rel->boundinfo, rel->live_parts))
+ {
+ partition_pathkeys = build_partition_pathkeys(root, rel,
+ ForwardScanDirection,
+ &partition_pathkeys_partial);
+
+ partition_pathkeys_desc = build_partition_pathkeys(root, rel,
+ BackwardScanDirection,
+ &partition_pathkeys_desc_partial);
+
+ /*
+ * You might think we should truncate_useless_pathkeys here, but
+ * allowing partition keys which are a subset of the query's pathkeys
+ * can often be useful. For example, consider a table partitioned by
+ * RANGE (a, b), and a query with ORDER BY a, b, c. If we have child
+ * paths that can produce the a, b, c ordering (perhaps via indexes on
+ * (a, b, c)) then it works to consider the appendrel output as
+ * ordered by a, b, c.
+ */
+ }
+
+ /* Now consider each interesting sort ordering */
+ foreach(lcp, all_child_pathkeys)
+ {
+ List *pathkeys = (List *) lfirst(lcp);
+ List *startup_subpaths = NIL;
+ List *total_subpaths = NIL;
+ List *fractional_subpaths = NIL;
+ bool startup_neq_total = false;
+ ListCell *lcr;
+ bool match_partition_order;
+ bool match_partition_order_desc;
+
+ /*
+ * Determine if this sort ordering matches any partition pathkeys we
+ * have, for both ascending and descending partition order. If the
+ * partition pathkeys happen to be contained in pathkeys then it still
+ * works, as described above, providing that the partition pathkeys
+ * are complete and not just a prefix of the partition keys. (In such
+ * cases we'll be relying on the child paths to have sorted the
+ * lower-order columns of the required pathkeys.)
+ */
+ match_partition_order =
+ pathkeys_contained_in(pathkeys, partition_pathkeys) ||
+ (!partition_pathkeys_partial &&
+ pathkeys_contained_in(partition_pathkeys, pathkeys));
+
+ match_partition_order_desc = !match_partition_order &&
+ (pathkeys_contained_in(pathkeys, partition_pathkeys_desc) ||
+ (!partition_pathkeys_desc_partial &&
+ pathkeys_contained_in(partition_pathkeys_desc, pathkeys)));
+
+ /* Select the child paths for this ordering... */
+ foreach(lcr, live_childrels)
+ {
+ RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
+ Path *cheapest_startup,
+ *cheapest_total,
+ *cheapest_fractional = NULL;
+
+ /* Locate the right paths, if they are available. */
+ cheapest_startup =
+ get_cheapest_path_for_pathkeys(childrel->pathlist,
+ pathkeys,
+ NULL,
+ STARTUP_COST,
+ false);
+ cheapest_total =
+ get_cheapest_path_for_pathkeys(childrel->pathlist,
+ pathkeys,
+ NULL,
+ TOTAL_COST,
+ false);
+
+ /*
+ * If we can't find any paths with the right order just use the
+ * cheapest-total path; we'll have to sort it later.
+ */
+ if (cheapest_startup == NULL || cheapest_total == NULL)
+ {
+ cheapest_startup = cheapest_total =
+ childrel->cheapest_total_path;
+ /* Assert we do have an unparameterized path for this child */
+ Assert(cheapest_total->param_info == NULL);
+ }
+
+ /*
+ * When building a fractional path, determine a cheapest
+ * fractional path for each child relation too. Looking at startup
+ * and total costs is not enough, because the cheapest fractional
+ * path may be dominated by two separate paths (one for startup,
+ * one for total).
+ *
+ * When needed (building fractional path), determine the cheapest
+ * fractional path too.
+ */
+ if (root->tuple_fraction > 0)
+ {
+ double path_fraction = (1.0 / root->tuple_fraction);
+
+ cheapest_fractional =
+ get_cheapest_fractional_path_for_pathkeys(childrel->pathlist,
+ pathkeys,
+ NULL,
+ path_fraction);
+
+ /*
+ * If we found no path with matching pathkeys, use the
+ * cheapest total path instead.
+ *
+ * XXX We might consider partially sorted paths too (with an
+ * incremental sort on top). But we'd have to build all the
+ * incremental paths, do the costing etc.
+ */
+ if (!cheapest_fractional)
+ cheapest_fractional = cheapest_total;
+ }
+
+ /*
+ * Notice whether we actually have different paths for the
+ * "cheapest" and "total" cases; frequently there will be no point
+ * in two create_merge_append_path() calls.
+ */
+ if (cheapest_startup != cheapest_total)
+ startup_neq_total = true;
+
+ /*
+ * Collect the appropriate child paths. The required logic varies
+ * for the Append and MergeAppend cases.
+ */
+ if (match_partition_order)
+ {
+ /*
+ * We're going to make a plain Append path. We don't need
+ * most of what accumulate_append_subpath would do, but we do
+ * want to cut out child Appends or MergeAppends if they have
+ * just a single subpath (and hence aren't doing anything
+ * useful).
+ */
+ cheapest_startup = get_singleton_append_subpath(cheapest_startup);
+ cheapest_total = get_singleton_append_subpath(cheapest_total);
+
+ startup_subpaths = lappend(startup_subpaths, cheapest_startup);
+ total_subpaths = lappend(total_subpaths, cheapest_total);
+
+ if (cheapest_fractional)
+ {
+ cheapest_fractional = get_singleton_append_subpath(cheapest_fractional);
+ fractional_subpaths = lappend(fractional_subpaths, cheapest_fractional);
+ }
+ }
+ else if (match_partition_order_desc)
+ {
+ /*
+ * As above, but we need to reverse the order of the children,
+ * because nodeAppend.c doesn't know anything about reverse
+ * ordering and will scan the children in the order presented.
+ */
+ cheapest_startup = get_singleton_append_subpath(cheapest_startup);
+ cheapest_total = get_singleton_append_subpath(cheapest_total);
+
+ startup_subpaths = lcons(cheapest_startup, startup_subpaths);
+ total_subpaths = lcons(cheapest_total, total_subpaths);
+
+ if (cheapest_fractional)
+ {
+ cheapest_fractional = get_singleton_append_subpath(cheapest_fractional);
+ fractional_subpaths = lcons(cheapest_fractional, fractional_subpaths);
+ }
+ }
+ else
+ {
+ /*
+ * Otherwise, rely on accumulate_append_subpath to collect the
+ * child paths for the MergeAppend.
+ */
+ accumulate_append_subpath(cheapest_startup,
+ &startup_subpaths, NULL);
+ accumulate_append_subpath(cheapest_total,
+ &total_subpaths, NULL);
+
+ if (cheapest_fractional)
+ accumulate_append_subpath(cheapest_fractional,
+ &fractional_subpaths, NULL);
+ }
+ }
+
+ /* ... and build the Append or MergeAppend paths */
+ if (match_partition_order || match_partition_order_desc)
+ {
+ /* We only need Append */
+ add_path(rel, (Path *) create_append_path(root,
+ rel,
+ startup_subpaths,
+ NIL,
+ pathkeys,
+ NULL,
+ 0,
+ false,
+ -1));
+ if (startup_neq_total)
+ add_path(rel, (Path *) create_append_path(root,
+ rel,
+ total_subpaths,
+ NIL,
+ pathkeys,
+ NULL,
+ 0,
+ false,
+ -1));
+
+ if (fractional_subpaths)
+ add_path(rel, (Path *) create_append_path(root,
+ rel,
+ fractional_subpaths,
+ NIL,
+ pathkeys,
+ NULL,
+ 0,
+ false,
+ -1));
+ }
+ else
+ {
+ /* We need MergeAppend */
+ add_path(rel, (Path *) create_merge_append_path(root,
+ rel,
+ startup_subpaths,
+ pathkeys,
+ NULL));
+ if (startup_neq_total)
+ add_path(rel, (Path *) create_merge_append_path(root,
+ rel,
+ total_subpaths,
+ pathkeys,
+ NULL));
+
+ if (fractional_subpaths)
+ add_path(rel, (Path *) create_merge_append_path(root,
+ rel,
+ fractional_subpaths,
+ pathkeys,
+ NULL));
+ }
+ }
+}
+
+/*
+ * get_cheapest_parameterized_child_path
+ * Get cheapest path for this relation that has exactly the requested
+ * parameterization.
+ *
+ * Returns NULL if unable to create such a path.
+ */
+static Path *
+get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
+ Relids required_outer)
+{
+ Path *cheapest;
+ ListCell *lc;
+
+ /*
+ * Look up the cheapest existing path with no more than the needed
+ * parameterization. If it has exactly the needed parameterization, we're
+ * done.
+ */
+ cheapest = get_cheapest_path_for_pathkeys(rel->pathlist,
+ NIL,
+ required_outer,
+ TOTAL_COST,
+ false);
+ Assert(cheapest != NULL);
+ if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
+ return cheapest;
+
+ /*
+ * Otherwise, we can "reparameterize" an existing path to match the given
+ * parameterization, which effectively means pushing down additional
+ * joinquals to be checked within the path's scan. However, some existing
+ * paths might check the available joinquals already while others don't;
+ * therefore, it's not clear which existing path will be cheapest after
+ * reparameterization. We have to go through them all and find out.
+ */
+ cheapest = NULL;
+ foreach(lc, rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+
+ /* Can't use it if it needs more than requested parameterization */
+ if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
+ continue;
+
+ /*
+ * Reparameterization can only increase the path's cost, so if it's
+ * already more expensive than the current cheapest, forget it.
+ */
+ if (cheapest != NULL &&
+ compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
+ continue;
+
+ /* Reparameterize if needed, then recheck cost */
+ if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
+ {
+ path = reparameterize_path(root, path, required_outer, 1.0);
+ if (path == NULL)
+ continue; /* failed to reparameterize this one */
+ Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
+
+ if (cheapest != NULL &&
+ compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
+ continue;
+ }
+
+ /* We have a new best path */
+ cheapest = path;
+ }
+
+ /* Return the best path, or NULL if we found no suitable candidate */
+ return cheapest;
+}
+
+/*
+ * accumulate_append_subpath
+ * Add a subpath to the list being built for an Append or MergeAppend.
+ *
+ * It's possible that the child is itself an Append or MergeAppend path, in
+ * which case we can "cut out the middleman" and just add its child paths to
+ * our own list. (We don't try to do this earlier because we need to apply
+ * both levels of transformation to the quals.)
+ *
+ * Note that if we omit a child MergeAppend in this way, we are effectively
+ * omitting a sort step, which seems fine: if the parent is to be an Append,
+ * its result would be unsorted anyway, while if the parent is to be a
+ * MergeAppend, there's no point in a separate sort on a child.
+ *
+ * Normally, either path is a partial path and subpaths is a list of partial
+ * paths, or else path is a non-partial plan and subpaths is a list of those.
+ * However, if path is a parallel-aware Append, then we add its partial path
+ * children to subpaths and the rest to special_subpaths. If the latter is
+ * NULL, we don't flatten the path at all (unless it contains only partial
+ * paths).
+ */
+static void
+accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths)
+{
+ if (IsA(path, AppendPath))
+ {
+ AppendPath *apath = (AppendPath *) path;
+
+ if (!apath->path.parallel_aware || apath->first_partial_path == 0)
+ {
+ *subpaths = list_concat(*subpaths, apath->subpaths);
+ return;
+ }
+ else if (special_subpaths != NULL)
+ {
+ List *new_special_subpaths;
+
+ /* Split Parallel Append into partial and non-partial subpaths */
+ *subpaths = list_concat(*subpaths,
+ list_copy_tail(apath->subpaths,
+ apath->first_partial_path));
+ new_special_subpaths =
+ list_truncate(list_copy(apath->subpaths),
+ apath->first_partial_path);
+ *special_subpaths = list_concat(*special_subpaths,
+ new_special_subpaths);
+ return;
+ }
+ }
+ else if (IsA(path, MergeAppendPath))
+ {
+ MergeAppendPath *mpath = (MergeAppendPath *) path;
+
+ *subpaths = list_concat(*subpaths, mpath->subpaths);
+ return;
+ }
+
+ *subpaths = lappend(*subpaths, path);
+}
+
+/*
+ * get_singleton_append_subpath
+ * Returns the single subpath of an Append/MergeAppend, or just
+ * return 'path' if it's not a single sub-path Append/MergeAppend.
+ *
+ * Note: 'path' must not be a parallel-aware path.
+ */
+static Path *
+get_singleton_append_subpath(Path *path)
+{
+ Assert(!path->parallel_aware);
+
+ if (IsA(path, AppendPath))
+ {
+ AppendPath *apath = (AppendPath *) path;
+
+ if (list_length(apath->subpaths) == 1)
+ return (Path *) linitial(apath->subpaths);
+ }
+ else if (IsA(path, MergeAppendPath))
+ {
+ MergeAppendPath *mpath = (MergeAppendPath *) path;
+
+ if (list_length(mpath->subpaths) == 1)
+ return (Path *) linitial(mpath->subpaths);
+ }
+
+ return path;
+}
+
+/*
+ * set_dummy_rel_pathlist
+ * Build a dummy path for a relation that's been excluded by constraints
+ *
+ * Rather than inventing a special "dummy" path type, we represent this as an
+ * AppendPath with no members (see also IS_DUMMY_APPEND/IS_DUMMY_REL macros).
+ *
+ * (See also mark_dummy_rel, which does basically the same thing, but is
+ * typically used to change a rel into dummy state after we already made
+ * paths for it.)
+ */
+static void
+set_dummy_rel_pathlist(RelOptInfo *rel)
+{
+ /* Set dummy size estimates --- we leave attr_widths[] as zeroes */
+ rel->rows = 0;
+ rel->reltarget->width = 0;
+
+ /* Discard any pre-existing paths; no further need for them */
+ rel->pathlist = NIL;
+ rel->partial_pathlist = NIL;
+
+ /* Set up the dummy path */
+ add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL,
+ NIL, rel->lateral_relids,
+ 0, false, -1));
+
+ /*
+ * We set the cheapest-path fields immediately, just in case they were
+ * pointing at some discarded path. This is redundant when we're called
+ * from set_rel_size(), but not when called from elsewhere, and doing it
+ * twice is harmless anyway.
+ */
+ set_cheapest(rel);
+}
+
+/* quick-and-dirty test to see if any joining is needed */
+static bool
+has_multiple_baserels(PlannerInfo *root)
+{
+ int num_base_rels = 0;
+ Index rti;
+
+ for (rti = 1; rti < root->simple_rel_array_size; rti++)
+ {
+ RelOptInfo *brel = root->simple_rel_array[rti];
+
+ if (brel == NULL)
+ continue;
+
+ /* ignore RTEs that are "other rels" */
+ if (brel->reloptkind == RELOPT_BASEREL)
+ if (++num_base_rels > 1)
+ return true;
+ }
+ return false;
+}
+
+/*
+ * find_window_run_conditions
+ * Determine if 'wfunc' is really a WindowFunc and call its prosupport
+ * function to determine the function's monotonic properties. We then
+ * see if 'opexpr' can be used to short-circuit execution.
+ *
+ * For example row_number() over (order by ...) always produces a value one
+ * higher than the previous. If someone has a window function in a subquery
+ * and has a WHERE clause in the outer query to filter rows <= 10, then we may
+ * as well stop processing the windowagg once the row number reaches 11. Here
+ * we check if 'opexpr' might help us to stop doing needless extra processing
+ * in WindowAgg nodes.
+ *
+ * '*keep_original' is set to true if the caller should also use 'opexpr' for
+ * its original purpose. This is set to false if the caller can assume that
+ * the run condition will handle all of the required filtering.
+ *
+ * Returns true if 'opexpr' was found to be useful and was added to the
+ * WindowClauses runCondition. We also set *keep_original accordingly and add
+ * 'attno' to *run_cond_attrs offset by FirstLowInvalidHeapAttributeNumber.
+ * If the 'opexpr' cannot be used then we set *keep_original to true and
+ * return false.
+ */
+static bool
+find_window_run_conditions(Query *subquery, RangeTblEntry *rte, Index rti,
+ AttrNumber attno, WindowFunc *wfunc, OpExpr *opexpr,
+ bool wfunc_left, bool *keep_original,
+ Bitmapset **run_cond_attrs)
+{
+ Oid prosupport;
+ Expr *otherexpr;
+ SupportRequestWFuncMonotonic req;
+ SupportRequestWFuncMonotonic *res;
+ WindowClause *wclause;
+ List *opinfos;
+ OpExpr *runopexpr;
+ Oid runoperator;
+ ListCell *lc;
+
+ *keep_original = true;
+
+ while (IsA(wfunc, RelabelType))
+ wfunc = (WindowFunc *) ((RelabelType *) wfunc)->arg;
+
+ /* we can only work with window functions */
+ if (!IsA(wfunc, WindowFunc))
+ return false;
+
+ /* can't use it if there are subplans in the WindowFunc */
+ if (contain_subplans((Node *) wfunc))
+ return false;
+
+ prosupport = get_func_support(wfunc->winfnoid);
+
+ /* Check if there's a support function for 'wfunc' */
+ if (!OidIsValid(prosupport))
+ return false;
+
+ /* get the Expr from the other side of the OpExpr */
+ if (wfunc_left)
+ otherexpr = lsecond(opexpr->args);
+ else
+ otherexpr = linitial(opexpr->args);
+
+ /*
+ * The value being compared must not change during the evaluation of the
+ * window partition.
+ */
+ if (!is_pseudo_constant_clause((Node *) otherexpr))
+ return false;
+
+ /* find the window clause belonging to the window function */
+ wclause = (WindowClause *) list_nth(subquery->windowClause,
+ wfunc->winref - 1);
+
+ req.type = T_SupportRequestWFuncMonotonic;
+ req.window_func = wfunc;
+ req.window_clause = wclause;
+
+ /* call the support function */
+ res = (SupportRequestWFuncMonotonic *)
+ DatumGetPointer(OidFunctionCall1(prosupport,
+ PointerGetDatum(&req)));
+
+ /*
+ * Nothing to do if the function is neither monotonically increasing nor
+ * monotonically decreasing.
+ */
+ if (res == NULL || res->monotonic == MONOTONICFUNC_NONE)
+ return false;
+
+ runopexpr = NULL;
+ runoperator = InvalidOid;
+ opinfos = get_op_btree_interpretation(opexpr->opno);
+
+ foreach(lc, opinfos)
+ {
+ OpBtreeInterpretation *opinfo = (OpBtreeInterpretation *) lfirst(lc);
+ int strategy = opinfo->strategy;
+
+ /* handle < / <= */
+ if (strategy == BTLessStrategyNumber ||
+ strategy == BTLessEqualStrategyNumber)
+ {
+ /*
+ * < / <= is supported for monotonically increasing functions in
+ * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
+ * for monotonically decreasing functions.
+ */
+ if ((wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)) ||
+ (!wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)))
+ {
+ *keep_original = false;
+ runopexpr = opexpr;
+ runoperator = opexpr->opno;
+ }
+ break;
+ }
+ /* handle > / >= */
+ else if (strategy == BTGreaterStrategyNumber ||
+ strategy == BTGreaterEqualStrategyNumber)
+ {
+ /*
+ * > / >= is supported for monotonically decreasing functions in
+ * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
+ * for monotonically increasing functions.
+ */
+ if ((wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)) ||
+ (!wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)))
+ {
+ *keep_original = false;
+ runopexpr = opexpr;
+ runoperator = opexpr->opno;
+ }
+ break;
+ }
+ /* handle = */
+ else if (strategy == BTEqualStrategyNumber)
+ {
+ int16 newstrategy;
+
+ /*
+ * When both monotonically increasing and decreasing then the
+ * return value of the window function will be the same each time.
+ * We can simply use 'opexpr' as the run condition without
+ * modifying it.
+ */
+ if ((res->monotonic & MONOTONICFUNC_BOTH) == MONOTONICFUNC_BOTH)
+ {
+ *keep_original = false;
+ runopexpr = opexpr;
+ runoperator = opexpr->opno;
+ break;
+ }
+
+ /*
+ * When monotonically increasing we make a qual with <wfunc> <=
+ * <value> or <value> >= <wfunc> in order to filter out values
+ * which are above the value in the equality condition. For
+ * monotonically decreasing functions we want to filter values
+ * below the value in the equality condition.
+ */
+ if (res->monotonic & MONOTONICFUNC_INCREASING)
+ newstrategy = wfunc_left ? BTLessEqualStrategyNumber : BTGreaterEqualStrategyNumber;
+ else
+ newstrategy = wfunc_left ? BTGreaterEqualStrategyNumber : BTLessEqualStrategyNumber;
+
+ /* We must keep the original equality qual */
+ *keep_original = true;
+ runopexpr = opexpr;
+
+ /* determine the operator to use for the runCondition qual */
+ runoperator = get_opfamily_member(opinfo->opfamily_id,
+ opinfo->oplefttype,
+ opinfo->oprighttype,
+ newstrategy);
+ break;
+ }
+ }
+
+ if (runopexpr != NULL)
+ {
+ Expr *newexpr;
+
+ /*
+ * Build the qual required for the run condition keeping the
+ * WindowFunc on the same side as it was originally.
+ */
+ if (wfunc_left)
+ newexpr = make_opclause(runoperator,
+ runopexpr->opresulttype,
+ runopexpr->opretset, (Expr *) wfunc,
+ otherexpr, runopexpr->opcollid,
+ runopexpr->inputcollid);
+ else
+ newexpr = make_opclause(runoperator,
+ runopexpr->opresulttype,
+ runopexpr->opretset,
+ otherexpr, (Expr *) wfunc,
+ runopexpr->opcollid,
+ runopexpr->inputcollid);
+
+ wclause->runCondition = lappend(wclause->runCondition, newexpr);
+
+ /* record that this attno was used in a run condition */
+ *run_cond_attrs = bms_add_member(*run_cond_attrs,
+ attno - FirstLowInvalidHeapAttributeNumber);
+ return true;
+ }
+
+ /* unsupported OpExpr */
+ return false;
+}
+
+/*
+ * check_and_push_window_quals
+ * Check if 'clause' is a qual that can be pushed into a WindowFunc's
+ * WindowClause as a 'runCondition' qual. These, when present, allow
+ * some unnecessary work to be skipped during execution.
+ *
+ * 'run_cond_attrs' will be populated with all targetlist resnos of subquery
+ * targets (offset by FirstLowInvalidHeapAttributeNumber) that we pushed
+ * window quals for.
+ *
+ * Returns true if the caller still must keep the original qual or false if
+ * the caller can safely ignore the original qual because the WindowAgg node
+ * will use the runCondition to stop returning tuples.
+ */
+static bool
+check_and_push_window_quals(Query *subquery, RangeTblEntry *rte, Index rti,
+ Node *clause, Bitmapset **run_cond_attrs)
+{
+ OpExpr *opexpr = (OpExpr *) clause;
+ bool keep_original = true;
+ Var *var1;
+ Var *var2;
+
+ /* We're only able to use OpExprs with 2 operands */
+ if (!IsA(opexpr, OpExpr))
+ return true;
+
+ if (list_length(opexpr->args) != 2)
+ return true;
+
+ /*
+ * Currently, we restrict this optimization to strict OpExprs. The reason
+ * for this is that during execution, once the runcondition becomes false,
+ * we stop evaluating WindowFuncs. To avoid leaving around stale window
+ * function result values, we set them to NULL. Having only strict
+ * OpExprs here ensures that we properly filter out the tuples with NULLs
+ * in the top-level WindowAgg.
+ */
+ set_opfuncid(opexpr);
+ if (!func_strict(opexpr->opfuncid))
+ return true;
+
+ /*
+ * Check for plain Vars that reference window functions in the subquery.
+ * If we find any, we'll ask find_window_run_conditions() if 'opexpr' can
+ * be used as part of the run condition.
+ */
+
+ /* Check the left side of the OpExpr */
+ var1 = linitial(opexpr->args);
+ if (IsA(var1, Var) && var1->varattno > 0)
+ {
+ TargetEntry *tle = list_nth(subquery->targetList, var1->varattno - 1);
+ WindowFunc *wfunc = (WindowFunc *) tle->expr;
+
+ if (find_window_run_conditions(subquery, rte, rti, tle->resno, wfunc,
+ opexpr, true, &keep_original,
+ run_cond_attrs))
+ return keep_original;
+ }
+
+ /* and check the right side */
+ var2 = lsecond(opexpr->args);
+ if (IsA(var2, Var) && var2->varattno > 0)
+ {
+ TargetEntry *tle = list_nth(subquery->targetList, var2->varattno - 1);
+ WindowFunc *wfunc = (WindowFunc *) tle->expr;
+
+ if (find_window_run_conditions(subquery, rte, rti, tle->resno, wfunc,
+ opexpr, false, &keep_original,
+ run_cond_attrs))
+ return keep_original;
+ }
+
+ return true;
+}
+
+/*
+ * set_subquery_pathlist
+ * Generate SubqueryScan access paths for a subquery RTE
+ *
+ * We don't currently support generating parameterized paths for subqueries
+ * by pushing join clauses down into them; it seems too expensive to re-plan
+ * the subquery multiple times to consider different alternatives.
+ * (XXX that could stand to be reconsidered, now that we use Paths.)
+ * So the paths made here will be parameterized if the subquery contains
+ * LATERAL references, otherwise not. As long as that's true, there's no need
+ * for a separate set_subquery_size phase: just make the paths right away.
+ */
+static void
+set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ Index rti, RangeTblEntry *rte)
+{
+ Query *parse = root->parse;
+ Query *subquery = rte->subquery;
+ Relids required_outer;
+ pushdown_safety_info safetyInfo;
+ double tuple_fraction;
+ RelOptInfo *sub_final_rel;
+ Bitmapset *run_cond_attrs = NULL;
+ ListCell *lc;
+
+ /*
+ * Must copy the Query so that planning doesn't mess up the RTE contents
+ * (really really need to fix the planner to not scribble on its input,
+ * someday ... but see remove_unused_subquery_outputs to start with).
+ */
+ subquery = copyObject(subquery);
+
+ /*
+ * If it's a LATERAL subquery, it might contain some Vars of the current
+ * query level, requiring it to be treated as parameterized, even though
+ * we don't support pushing down join quals into subqueries.
+ */
+ required_outer = rel->lateral_relids;
+
+ /*
+ * Zero out result area for subquery_is_pushdown_safe, so that it can set
+ * flags as needed while recursing. In particular, we need a workspace
+ * for keeping track of the reasons why columns are unsafe to reference.
+ * These reasons are stored in the bits inside unsafeFlags[i] when we
+ * discover reasons that column i of the subquery is unsafe to be used in
+ * a pushed-down qual.
+ */
+ memset(&safetyInfo, 0, sizeof(safetyInfo));
+ safetyInfo.unsafeFlags = (unsigned char *)
+ palloc0((list_length(subquery->targetList) + 1) * sizeof(unsigned char));
+
+ /*
+ * If the subquery has the "security_barrier" flag, it means the subquery
+ * originated from a view that must enforce row-level security. Then we
+ * must not push down quals that contain leaky functions. (Ideally this
+ * would be checked inside subquery_is_pushdown_safe, but since we don't
+ * currently pass the RTE to that function, we must do it here.)
+ */
+ safetyInfo.unsafeLeaky = rte->security_barrier;
+
+ /*
+ * If there are any restriction clauses that have been attached to the
+ * subquery relation, consider pushing them down to become WHERE or HAVING
+ * quals of the subquery itself. This transformation is useful because it
+ * may allow us to generate a better plan for the subquery than evaluating
+ * all the subquery output rows and then filtering them.
+ *
+ * There are several cases where we cannot push down clauses. Restrictions
+ * involving the subquery are checked by subquery_is_pushdown_safe().
+ * Restrictions on individual clauses are checked by
+ * qual_is_pushdown_safe(). Also, we don't want to push down
+ * pseudoconstant clauses; better to have the gating node above the
+ * subquery.
+ *
+ * Non-pushed-down clauses will get evaluated as qpquals of the
+ * SubqueryScan node.
+ *
+ * XXX Are there any cases where we want to make a policy decision not to
+ * push down a pushable qual, because it'd result in a worse plan?
+ */
+ if (rel->baserestrictinfo != NIL &&
+ subquery_is_pushdown_safe(subquery, subquery, &safetyInfo))
+ {
+ /* OK to consider pushing down individual quals */
+ List *upperrestrictlist = NIL;
+ ListCell *l;
+
+ foreach(l, rel->baserestrictinfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
+ Node *clause = (Node *) rinfo->clause;
+
+ if (rinfo->pseudoconstant)
+ {
+ upperrestrictlist = lappend(upperrestrictlist, rinfo);
+ continue;
+ }
+
+ switch (qual_is_pushdown_safe(subquery, rti, rinfo, &safetyInfo))
+ {
+ case PUSHDOWN_SAFE:
+ /* Push it down */
+ subquery_push_qual(subquery, rte, rti, clause);
+ break;
+
+ case PUSHDOWN_WINDOWCLAUSE_RUNCOND:
+
+ /*
+ * Since we can't push the qual down into the subquery,
+ * check if it happens to reference a window function. If
+ * so then it might be useful to use for the WindowAgg's
+ * runCondition.
+ */
+ if (!subquery->hasWindowFuncs ||
+ check_and_push_window_quals(subquery, rte, rti, clause,
+ &run_cond_attrs))
+ {
+ /*
+ * subquery has no window funcs or the clause is not a
+ * suitable window run condition qual or it is, but
+ * the original must also be kept in the upper query.
+ */
+ upperrestrictlist = lappend(upperrestrictlist, rinfo);
+ }
+ break;
+
+ case PUSHDOWN_UNSAFE:
+ upperrestrictlist = lappend(upperrestrictlist, rinfo);
+ break;
+ }
+ }
+ rel->baserestrictinfo = upperrestrictlist;
+ /* We don't bother recomputing baserestrict_min_security */
+ }
+
+ pfree(safetyInfo.unsafeFlags);
+
+ /*
+ * The upper query might not use all the subquery's output columns; if
+ * not, we can simplify. Pass the attributes that were pushed down into
+ * WindowAgg run conditions to ensure we don't accidentally think those
+ * are unused.
+ */
+ remove_unused_subquery_outputs(subquery, rel, run_cond_attrs);
+
+ /*
+ * We can safely pass the outer tuple_fraction down to the subquery if the
+ * outer level has no joining, aggregation, or sorting to do. Otherwise
+ * we'd better tell the subquery to plan for full retrieval. (XXX This
+ * could probably be made more intelligent ...)
+ */
+ if (parse->hasAggs ||
+ parse->groupClause ||
+ parse->groupingSets ||
+ parse->havingQual ||
+ parse->distinctClause ||
+ parse->sortClause ||
+ has_multiple_baserels(root))
+ tuple_fraction = 0.0; /* default case */
+ else
+ tuple_fraction = root->tuple_fraction;
+
+ /* plan_params should not be in use in current query level */
+ Assert(root->plan_params == NIL);
+
+ /* Generate a subroot and Paths for the subquery */
+ rel->subroot = subquery_planner(root->glob, subquery,
+ root,
+ false, tuple_fraction);
+
+ /* Isolate the params needed by this specific subplan */
+ rel->subplan_params = root->plan_params;
+ root->plan_params = NIL;
+
+ /*
+ * It's possible that constraint exclusion proved the subquery empty. If
+ * so, it's desirable to produce an unadorned dummy path so that we will
+ * recognize appropriate optimizations at this query level.
+ */
+ sub_final_rel = fetch_upper_rel(rel->subroot, UPPERREL_FINAL, NULL);
+
+ if (IS_DUMMY_REL(sub_final_rel))
+ {
+ set_dummy_rel_pathlist(rel);
+ return;
+ }
+
+ /*
+ * Mark rel with estimated output rows, width, etc. Note that we have to
+ * do this before generating outer-query paths, else cost_subqueryscan is
+ * not happy.
+ */
+ set_subquery_size_estimates(root, rel);
+
+ /*
+ * For each Path that subquery_planner produced, make a SubqueryScanPath
+ * in the outer query.
+ */
+ foreach(lc, sub_final_rel->pathlist)
+ {
+ Path *subpath = (Path *) lfirst(lc);
+ List *pathkeys;
+
+ /* Convert subpath's pathkeys to outer representation */
+ pathkeys = convert_subquery_pathkeys(root,
+ rel,
+ subpath->pathkeys,
+ make_tlist_from_pathtarget(subpath->pathtarget));
+
+ /* Generate outer path using this subpath */
+ add_path(rel, (Path *)
+ create_subqueryscan_path(root, rel, subpath,
+ pathkeys, required_outer));
+ }
+
+ /* If outer rel allows parallelism, do same for partial paths. */
+ if (rel->consider_parallel && bms_is_empty(required_outer))
+ {
+ /* If consider_parallel is false, there should be no partial paths. */
+ Assert(sub_final_rel->consider_parallel ||
+ sub_final_rel->partial_pathlist == NIL);
+
+ /* Same for partial paths. */
+ foreach(lc, sub_final_rel->partial_pathlist)
+ {
+ Path *subpath = (Path *) lfirst(lc);
+ List *pathkeys;
+
+ /* Convert subpath's pathkeys to outer representation */
+ pathkeys = convert_subquery_pathkeys(root,
+ rel,
+ subpath->pathkeys,
+ make_tlist_from_pathtarget(subpath->pathtarget));
+
+ /* Generate outer path using this subpath */
+ add_partial_path(rel, (Path *)
+ create_subqueryscan_path(root, rel, subpath,
+ pathkeys,
+ required_outer));
+ }
+ }
+}
+
+/*
+ * set_function_pathlist
+ * Build the (single) access path for a function RTE
+ */
+static void
+set_function_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
+{
+ Relids required_outer;
+ List *pathkeys = NIL;
+
+ /*
+ * We don't support pushing join clauses into the quals of a function
+ * scan, but it could still have required parameterization due to LATERAL
+ * refs in the function expression.
+ */
+ required_outer = rel->lateral_relids;
+
+ /*
+ * The result is considered unordered unless ORDINALITY was used, in which
+ * case it is ordered by the ordinal column (the last one). See if we
+ * care, by checking for uses of that Var in equivalence classes.
+ */
+ if (rte->funcordinality)
+ {
+ AttrNumber ordattno = rel->max_attr;
+ Var *var = NULL;
+ ListCell *lc;
+
+ /*
+ * Is there a Var for it in rel's targetlist? If not, the query did
+ * not reference the ordinality column, or at least not in any way
+ * that would be interesting for sorting.
+ */
+ foreach(lc, rel->reltarget->exprs)
+ {
+ Var *node = (Var *) lfirst(lc);
+
+ /* checking varno/varlevelsup is just paranoia */
+ if (IsA(node, Var) &&
+ node->varattno == ordattno &&
+ node->varno == rel->relid &&
+ node->varlevelsup == 0)
+ {
+ var = node;
+ break;
+ }
+ }
+
+ /*
+ * Try to build pathkeys for this Var with int8 sorting. We tell
+ * build_expression_pathkey not to build any new equivalence class; if
+ * the Var isn't already mentioned in some EC, it means that nothing
+ * cares about the ordering.
+ */
+ if (var)
+ pathkeys = build_expression_pathkey(root,
+ (Expr *) var,
+ NULL, /* below outer joins */
+ Int8LessOperator,
+ rel->relids,
+ false);
+ }
+
+ /* Generate appropriate path */
+ add_path(rel, create_functionscan_path(root, rel,
+ pathkeys, required_outer));
+}
+
+/*
+ * set_values_pathlist
+ * Build the (single) access path for a VALUES RTE
+ */
+static void
+set_values_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
+{
+ Relids required_outer;
+
+ /*
+ * We don't support pushing join clauses into the quals of a values scan,
+ * but it could still have required parameterization due to LATERAL refs
+ * in the values expressions.
+ */
+ required_outer = rel->lateral_relids;
+
+ /* Generate appropriate path */
+ add_path(rel, create_valuesscan_path(root, rel, required_outer));
+}
+
+/*
+ * set_tablefunc_pathlist
+ * Build the (single) access path for a table func RTE
+ */
+static void
+set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
+{
+ Relids required_outer;
+
+ /*
+ * We don't support pushing join clauses into the quals of a tablefunc
+ * scan, but it could still have required parameterization due to LATERAL
+ * refs in the function expression.
+ */
+ required_outer = rel->lateral_relids;
+
+ /* Generate appropriate path */
+ add_path(rel, create_tablefuncscan_path(root, rel,
+ required_outer));
+}
+
+/*
+ * set_cte_pathlist
+ * Build the (single) access path for a non-self-reference CTE RTE
+ *
+ * There's no need for a separate set_cte_size phase, since we don't
+ * support join-qual-parameterized paths for CTEs.
+ */
+static void
+set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
+{
+ Plan *cteplan;
+ PlannerInfo *cteroot;
+ Index levelsup;
+ int ndx;
+ ListCell *lc;
+ int plan_id;
+ Relids required_outer;
+
+ /*
+ * Find the referenced CTE, and locate the plan previously made for it.
+ */
+ levelsup = rte->ctelevelsup;
+ cteroot = root;
+ while (levelsup-- > 0)
+ {
+ cteroot = cteroot->parent_root;
+ if (!cteroot) /* shouldn't happen */
+ elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
+ }
+
+ /*
+ * Note: cte_plan_ids can be shorter than cteList, if we are still working
+ * on planning the CTEs (ie, this is a side-reference from another CTE).
+ * So we mustn't use forboth here.
+ */
+ ndx = 0;
+ foreach(lc, cteroot->parse->cteList)
+ {
+ CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
+
+ if (strcmp(cte->ctename, rte->ctename) == 0)
+ break;
+ ndx++;
+ }
+ if (lc == NULL) /* shouldn't happen */
+ elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
+ if (ndx >= list_length(cteroot->cte_plan_ids))
+ elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
+ plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
+ if (plan_id <= 0)
+ elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
+ cteplan = (Plan *) list_nth(root->glob->subplans, plan_id - 1);
+
+ /* Mark rel with estimated output rows, width, etc */
+ set_cte_size_estimates(root, rel, cteplan->plan_rows);
+
+ /*
+ * We don't support pushing join clauses into the quals of a CTE scan, but
+ * it could still have required parameterization due to LATERAL refs in
+ * its tlist.
+ */
+ required_outer = rel->lateral_relids;
+
+ /* Generate appropriate path */
+ add_path(rel, create_ctescan_path(root, rel, required_outer));
+}
+
+/*
+ * set_namedtuplestore_pathlist
+ * Build the (single) access path for a named tuplestore RTE
+ *
+ * There's no need for a separate set_namedtuplestore_size phase, since we
+ * don't support join-qual-parameterized paths for tuplestores.
+ */
+static void
+set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte)
+{
+ Relids required_outer;
+
+ /* Mark rel with estimated output rows, width, etc */
+ set_namedtuplestore_size_estimates(root, rel);
+
+ /*
+ * We don't support pushing join clauses into the quals of a tuplestore
+ * scan, but it could still have required parameterization due to LATERAL
+ * refs in its tlist.
+ */
+ required_outer = rel->lateral_relids;
+
+ /* Generate appropriate path */
+ add_path(rel, create_namedtuplestorescan_path(root, rel, required_outer));
+
+ /* Select cheapest path (pretty easy in this case...) */
+ set_cheapest(rel);
+}
+
+/*
+ * set_result_pathlist
+ * Build the (single) access path for an RTE_RESULT RTE
+ *
+ * There's no need for a separate set_result_size phase, since we
+ * don't support join-qual-parameterized paths for these RTEs.
+ */
+static void
+set_result_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte)
+{
+ Relids required_outer;
+
+ /* Mark rel with estimated output rows, width, etc */
+ set_result_size_estimates(root, rel);
+
+ /*
+ * We don't support pushing join clauses into the quals of a Result scan,
+ * but it could still have required parameterization due to LATERAL refs
+ * in its tlist.
+ */
+ required_outer = rel->lateral_relids;
+
+ /* Generate appropriate path */
+ add_path(rel, create_resultscan_path(root, rel, required_outer));
+
+ /* Select cheapest path (pretty easy in this case...) */
+ set_cheapest(rel);
+}
+
+/*
+ * set_worktable_pathlist
+ * Build the (single) access path for a self-reference CTE RTE
+ *
+ * There's no need for a separate set_worktable_size phase, since we don't
+ * support join-qual-parameterized paths for CTEs.
+ */
+static void
+set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
+{
+ Path *ctepath;
+ PlannerInfo *cteroot;
+ Index levelsup;
+ Relids required_outer;
+
+ /*
+ * We need to find the non-recursive term's path, which is in the plan
+ * level that's processing the recursive UNION, which is one level *below*
+ * where the CTE comes from.
+ */
+ levelsup = rte->ctelevelsup;
+ if (levelsup == 0) /* shouldn't happen */
+ elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
+ levelsup--;
+ cteroot = root;
+ while (levelsup-- > 0)
+ {
+ cteroot = cteroot->parent_root;
+ if (!cteroot) /* shouldn't happen */
+ elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
+ }
+ ctepath = cteroot->non_recursive_path;
+ if (!ctepath) /* shouldn't happen */
+ elog(ERROR, "could not find path for CTE \"%s\"", rte->ctename);
+
+ /* Mark rel with estimated output rows, width, etc */
+ set_cte_size_estimates(root, rel, ctepath->rows);
+
+ /*
+ * We don't support pushing join clauses into the quals of a worktable
+ * scan, but it could still have required parameterization due to LATERAL
+ * refs in its tlist. (I'm not sure this is actually possible given the
+ * restrictions on recursive references, but it's easy enough to support.)
+ */
+ required_outer = rel->lateral_relids;
+
+ /* Generate appropriate path */
+ add_path(rel, create_worktablescan_path(root, rel, required_outer));
+}
+
+/*
+ * generate_gather_paths
+ * Generate parallel access paths for a relation by pushing a Gather or
+ * Gather Merge on top of a partial path.
+ *
+ * This must not be called until after we're done creating all partial paths
+ * for the specified relation. (Otherwise, add_partial_path might delete a
+ * path that some GatherPath or GatherMergePath has a reference to.)
+ *
+ * If we're generating paths for a scan or join relation, override_rows will
+ * be false, and we'll just use the relation's size estimate. When we're
+ * being called for a partially-grouped path, though, we need to override
+ * the rowcount estimate. (It's not clear that the particular value we're
+ * using here is actually best, but the underlying rel has no estimate so
+ * we must do something.)
+ */
+void
+generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
+{
+ Path *cheapest_partial_path;
+ Path *simple_gather_path;
+ ListCell *lc;
+ double rows;
+ double *rowsp = NULL;
+
+ /* If there are no partial paths, there's nothing to do here. */
+ if (rel->partial_pathlist == NIL)
+ return;
+
+ /* Should we override the rel's rowcount estimate? */
+ if (override_rows)
+ rowsp = &rows;
+
+ /*
+ * The output of Gather is always unsorted, so there's only one partial
+ * path of interest: the cheapest one. That will be the one at the front
+ * of partial_pathlist because of the way add_partial_path works.
+ */
+ cheapest_partial_path = linitial(rel->partial_pathlist);
+ rows =
+ cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
+ simple_gather_path = (Path *)
+ create_gather_path(root, rel, cheapest_partial_path, rel->reltarget,
+ NULL, rowsp);
+ add_path(rel, simple_gather_path);
+
+ /*
+ * For each useful ordering, we can consider an order-preserving Gather
+ * Merge.
+ */
+ foreach(lc, rel->partial_pathlist)
+ {
+ Path *subpath = (Path *) lfirst(lc);
+ GatherMergePath *path;
+
+ if (subpath->pathkeys == NIL)
+ continue;
+
+ rows = subpath->rows * subpath->parallel_workers;
+ path = create_gather_merge_path(root, rel, subpath, rel->reltarget,
+ subpath->pathkeys, NULL, rowsp);
+ add_path(rel, &path->path);
+ }
+}
+
+/*
+ * get_useful_pathkeys_for_relation
+ * Determine which orderings of a relation might be useful.
+ *
+ * Getting data in sorted order can be useful either because the requested
+ * order matches the final output ordering for the overall query we're
+ * planning, or because it enables an efficient merge join. Here, we try
+ * to figure out which pathkeys to consider.
+ *
+ * This allows us to do incremental sort on top of an index scan under a gather
+ * merge node, i.e. parallelized.
+ *
+ * If the require_parallel_safe is true, we also require the expressions to
+ * be parallel safe (which allows pushing the sort below Gather Merge).
+ *
+ * XXX At the moment this can only ever return a list with a single element,
+ * because it looks at query_pathkeys only. So we might return the pathkeys
+ * directly, but it seems plausible we'll want to consider other orderings
+ * in the future. For example, we might want to consider pathkeys useful for
+ * merge joins.
+ */
+static List *
+get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel,
+ bool require_parallel_safe)
+{
+ List *useful_pathkeys_list = NIL;
+
+ /*
+ * Considering query_pathkeys is always worth it, because it might allow
+ * us to avoid a total sort when we have a partially presorted path
+ * available or to push the total sort into the parallel portion of the
+ * query.
+ */
+ if (root->query_pathkeys)
+ {
+ ListCell *lc;
+ int npathkeys = 0; /* useful pathkeys */
+
+ foreach(lc, root->query_pathkeys)
+ {
+ PathKey *pathkey = (PathKey *) lfirst(lc);
+ EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
+
+ /*
+ * We can only build a sort for pathkeys that contain a
+ * safe-to-compute-early EC member computable from the current
+ * relation's reltarget, so ignore the remainder of the list as
+ * soon as we find a pathkey without such a member.
+ *
+ * It's still worthwhile to return any prefix of the pathkeys list
+ * that meets this requirement, as we may be able to do an
+ * incremental sort.
+ *
+ * If requested, ensure the sort expression is parallel-safe too.
+ */
+ if (!relation_can_be_sorted_early(root, rel, pathkey_ec,
+ require_parallel_safe))
+ break;
+
+ npathkeys++;
+ }
+
+ /*
+ * The whole query_pathkeys list matches, so append it directly, to
+ * allow comparing pathkeys easily by comparing list pointer. If we
+ * have to truncate the pathkeys, we gotta do a copy though.
+ */
+ if (npathkeys == list_length(root->query_pathkeys))
+ useful_pathkeys_list = lappend(useful_pathkeys_list,
+ root->query_pathkeys);
+ else if (npathkeys > 0)
+ useful_pathkeys_list = lappend(useful_pathkeys_list,
+ list_truncate(list_copy(root->query_pathkeys),
+ npathkeys));
+ }
+
+ return useful_pathkeys_list;
+}
+
+/*
+ * generate_useful_gather_paths
+ * Generate parallel access paths for a relation by pushing a Gather or
+ * Gather Merge on top of a partial path.
+ *
+ * Unlike plain generate_gather_paths, this looks both at pathkeys of input
+ * paths (aiming to preserve the ordering), but also considers ordering that
+ * might be useful for nodes above the gather merge node, and tries to add
+ * a sort (regular or incremental) to provide that.
+ */
+void
+generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
+{
+ ListCell *lc;
+ double rows;
+ double *rowsp = NULL;
+ List *useful_pathkeys_list = NIL;
+ Path *cheapest_partial_path = NULL;
+
+ /* If there are no partial paths, there's nothing to do here. */
+ if (rel->partial_pathlist == NIL)
+ return;
+
+ /* Should we override the rel's rowcount estimate? */
+ if (override_rows)
+ rowsp = &rows;
+
+ /* generate the regular gather (merge) paths */
+ generate_gather_paths(root, rel, override_rows);
+
+ /* consider incremental sort for interesting orderings */
+ useful_pathkeys_list = get_useful_pathkeys_for_relation(root, rel, true);
+
+ /* used for explicit (full) sort paths */
+ cheapest_partial_path = linitial(rel->partial_pathlist);
+
+ /*
+ * Consider sorted paths for each interesting ordering. We generate both
+ * incremental and full sort.
+ */
+ foreach(lc, useful_pathkeys_list)
+ {
+ List *useful_pathkeys = lfirst(lc);
+ ListCell *lc2;
+ bool is_sorted;
+ int presorted_keys;
+
+ foreach(lc2, rel->partial_pathlist)
+ {
+ Path *subpath = (Path *) lfirst(lc2);
+ GatherMergePath *path;
+
+ is_sorted = pathkeys_count_contained_in(useful_pathkeys,
+ subpath->pathkeys,
+ &presorted_keys);
+
+ /*
+ * We don't need to consider the case where a subpath is already
+ * fully sorted because generate_gather_paths already creates a
+ * gather merge path for every subpath that has pathkeys present.
+ *
+ * But since the subpath is already sorted, we know we don't need
+ * to consider adding a sort (full or incremental) on top of it,
+ * so we can continue here.
+ */
+ if (is_sorted)
+ continue;
+
+ /*
+ * Consider regular sort for the cheapest partial path (for each
+ * useful pathkeys). We know the path is not sorted, because we'd
+ * not get here otherwise.
+ *
+ * This is not redundant with the gather paths created in
+ * generate_gather_paths, because that doesn't generate ordered
+ * output. Here we add an explicit sort to match the useful
+ * ordering.
+ */
+ if (cheapest_partial_path == subpath)
+ {
+ Path *tmp;
+
+ tmp = (Path *) create_sort_path(root,
+ rel,
+ subpath,
+ useful_pathkeys,
+ -1.0);
+
+ rows = tmp->rows * tmp->parallel_workers;
+
+ path = create_gather_merge_path(root, rel,
+ tmp,
+ rel->reltarget,
+ tmp->pathkeys,
+ NULL,
+ rowsp);
+
+ add_path(rel, &path->path);
+
+ /* Fall through */
+ }
+
+ /*
+ * Consider incremental sort, but only when the subpath is already
+ * partially sorted on a pathkey prefix.
+ */
+ if (enable_incremental_sort && presorted_keys > 0)
+ {
+ Path *tmp;
+
+ /*
+ * We should have already excluded pathkeys of length 1
+ * because then presorted_keys > 0 would imply is_sorted was
+ * true.
+ */
+ Assert(list_length(useful_pathkeys) != 1);
+
+ tmp = (Path *) create_incremental_sort_path(root,
+ rel,
+ subpath,
+ useful_pathkeys,
+ presorted_keys,
+ -1);
+
+ path = create_gather_merge_path(root, rel,
+ tmp,
+ rel->reltarget,
+ tmp->pathkeys,
+ NULL,
+ rowsp);
+
+ add_path(rel, &path->path);
+ }
+ }
+ }
+}
+
+/*
+ * make_rel_from_joinlist
+ * Build access paths using a "joinlist" to guide the join path search.
+ *
+ * See comments for deconstruct_jointree() for definition of the joinlist
+ * data structure.
+ */
+static RelOptInfo *
+make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
+{
+ int levels_needed;
+ List *initial_rels;
+ ListCell *jl;
+
+ /*
+ * Count the number of child joinlist nodes. This is the depth of the
+ * dynamic-programming algorithm we must employ to consider all ways of
+ * joining the child nodes.
+ */
+ levels_needed = list_length(joinlist);
+
+ if (levels_needed <= 0)
+ return NULL; /* nothing to do? */
+
+ /*
+ * Construct a list of rels corresponding to the child joinlist nodes.
+ * This may contain both base rels and rels constructed according to
+ * sub-joinlists.
+ */
+ initial_rels = NIL;
+ foreach(jl, joinlist)
+ {
+ Node *jlnode = (Node *) lfirst(jl);
+ RelOptInfo *thisrel;
+
+ if (IsA(jlnode, RangeTblRef))
+ {
+ int varno = ((RangeTblRef *) jlnode)->rtindex;
+
+ thisrel = find_base_rel(root, varno);
+ }
+ else if (IsA(jlnode, List))
+ {
+ /* Recurse to handle subproblem */
+ thisrel = make_rel_from_joinlist(root, (List *) jlnode);
+ }
+ else
+ {
+ elog(ERROR, "unrecognized joinlist node type: %d",
+ (int) nodeTag(jlnode));
+ thisrel = NULL; /* keep compiler quiet */
+ }
+
+ initial_rels = lappend(initial_rels, thisrel);
+ }
+
+ if (levels_needed == 1)
+ {
+ /*
+ * Single joinlist node, so we're done.
+ */
+ return (RelOptInfo *) linitial(initial_rels);
+ }
+ else
+ {
+ /*
+ * Consider the different orders in which we could join the rels,
+ * using a plugin, GEQO, or the regular join search code.
+ *
+ * We put the initial_rels list into a PlannerInfo field because
+ * has_legal_joinclause() needs to look at it (ugly :-().
+ */
+ root->initial_rels = initial_rels;
+
+ if (join_search_hook)
+ return (*join_search_hook) (root, levels_needed, initial_rels);
+ else if (enable_geqo && levels_needed >= geqo_threshold)
+ return geqo(root, levels_needed, initial_rels);
+ else
+ return standard_join_search(root, levels_needed, initial_rels);
+ }
+}
+
+/*
+ * standard_join_search
+ * Find possible joinpaths for a query by successively finding ways
+ * to join component relations into join relations.
+ *
+ * 'levels_needed' is the number of iterations needed, ie, the number of
+ * independent jointree items in the query. This is > 1.
+ *
+ * 'initial_rels' is a list of RelOptInfo nodes for each independent
+ * jointree item. These are the components to be joined together.
+ * Note that levels_needed == list_length(initial_rels).
+ *
+ * Returns the final level of join relations, i.e., the relation that is
+ * the result of joining all the original relations together.
+ * At least one implementation path must be provided for this relation and
+ * all required sub-relations.
+ *
+ * To support loadable plugins that modify planner behavior by changing the
+ * join searching algorithm, we provide a hook variable that lets a plugin
+ * replace or supplement this function. Any such hook must return the same
+ * final join relation as the standard code would, but it might have a
+ * different set of implementation paths attached, and only the sub-joinrels
+ * needed for these paths need have been instantiated.
+ *
+ * Note to plugin authors: the functions invoked during standard_join_search()
+ * modify root->join_rel_list and root->join_rel_hash. If you want to do more
+ * than one join-order search, you'll probably need to save and restore the
+ * original states of those data structures. See geqo_eval() for an example.
+ */
+RelOptInfo *
+standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
+{
+ int lev;
+ RelOptInfo *rel;
+
+ /*
+ * This function cannot be invoked recursively within any one planning
+ * problem, so join_rel_level[] can't be in use already.
+ */
+ Assert(root->join_rel_level == NULL);
+
+ /*
+ * We employ a simple "dynamic programming" algorithm: we first find all
+ * ways to build joins of two jointree items, then all ways to build joins
+ * of three items (from two-item joins and single items), then four-item
+ * joins, and so on until we have considered all ways to join all the
+ * items into one rel.
+ *
+ * root->join_rel_level[j] is a list of all the j-item rels. Initially we
+ * set root->join_rel_level[1] to represent all the single-jointree-item
+ * relations.
+ */
+ root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
+
+ root->join_rel_level[1] = initial_rels;
+
+ for (lev = 2; lev <= levels_needed; lev++)
+ {
+ ListCell *lc;
+
+ /*
+ * Determine all possible pairs of relations to be joined at this
+ * level, and build paths for making each one from every available
+ * pair of lower-level relations.
+ */
+ join_search_one_level(root, lev);
+
+ /*
+ * Run generate_partitionwise_join_paths() and
+ * generate_useful_gather_paths() for each just-processed joinrel. We
+ * could not do this earlier because both regular and partial paths
+ * can get added to a particular joinrel at multiple times within
+ * join_search_one_level.
+ *
+ * After that, we're done creating paths for the joinrel, so run
+ * set_cheapest().
+ */
+ foreach(lc, root->join_rel_level[lev])
+ {
+ rel = (RelOptInfo *) lfirst(lc);
+
+ /* Create paths for partitionwise joins. */
+ generate_partitionwise_join_paths(root, rel);
+
+ /*
+ * Except for the topmost scan/join rel, consider gathering
+ * partial paths. We'll do the same for the topmost scan/join rel
+ * once we know the final targetlist (see grouping_planner).
+ */
+ if (!bms_equal(rel->relids, root->all_baserels))
+ generate_useful_gather_paths(root, rel, false);
+
+ /* Find and save the cheapest paths for this rel */
+ set_cheapest(rel);
+
+#ifdef OPTIMIZER_DEBUG
+ debug_print_rel(root, rel);
+#endif
+ }
+ }
+
+ /*
+ * We should have a single rel at the final level.
+ */
+ if (root->join_rel_level[levels_needed] == NIL)
+ elog(ERROR, "failed to build any %d-way joins", levels_needed);
+ Assert(list_length(root->join_rel_level[levels_needed]) == 1);
+
+ rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
+
+ root->join_rel_level = NULL;
+
+ return rel;
+}
+
+/*****************************************************************************
+ * PUSHING QUALS DOWN INTO SUBQUERIES
+ *****************************************************************************/
+
+/*
+ * subquery_is_pushdown_safe - is a subquery safe for pushing down quals?
+ *
+ * subquery is the particular component query being checked. topquery
+ * is the top component of a set-operations tree (the same Query if no
+ * set-op is involved).
+ *
+ * Conditions checked here:
+ *
+ * 1. If the subquery has a LIMIT clause, we must not push down any quals,
+ * since that could change the set of rows returned.
+ *
+ * 2. If the subquery contains EXCEPT or EXCEPT ALL set ops we cannot push
+ * quals into it, because that could change the results.
+ *
+ * 3. If the subquery uses DISTINCT, we cannot push volatile quals into it.
+ * This is because upper-level quals should semantically be evaluated only
+ * once per distinct row, not once per original row, and if the qual is
+ * volatile then extra evaluations could change the results. (This issue
+ * does not apply to other forms of aggregation such as GROUP BY, because
+ * when those are present we push into HAVING not WHERE, so that the quals
+ * are still applied after aggregation.)
+ *
+ * 4. If the subquery contains window functions, we cannot push volatile quals
+ * into it. The issue here is a bit different from DISTINCT: a volatile qual
+ * might succeed for some rows of a window partition and fail for others,
+ * thereby changing the partition contents and thus the window functions'
+ * results for rows that remain.
+ *
+ * 5. If the subquery contains any set-returning functions in its targetlist,
+ * we cannot push volatile quals into it. That would push them below the SRFs
+ * and thereby change the number of times they are evaluated. Also, a
+ * volatile qual could succeed for some SRF output rows and fail for others,
+ * a behavior that cannot occur if it's evaluated before SRF expansion.
+ *
+ * 6. If the subquery has nonempty grouping sets, we cannot push down any
+ * quals. The concern here is that a qual referencing a "constant" grouping
+ * column could get constant-folded, which would be improper because the value
+ * is potentially nullable by grouping-set expansion. This restriction could
+ * be removed if we had a parsetree representation that shows that such
+ * grouping columns are not really constant. (There are other ideas that
+ * could be used to relax this restriction, but that's the approach most
+ * likely to get taken in the future. Note that there's not much to be gained
+ * so long as subquery_planner can't move HAVING clauses to WHERE within such
+ * a subquery.)
+ *
+ * In addition, we make several checks on the subquery's output columns to see
+ * if it is safe to reference them in pushed-down quals. If output column k
+ * is found to be unsafe to reference, we set the reason for that inside
+ * safetyInfo->unsafeFlags[k], but we don't reject the subquery overall since
+ * column k might not be referenced by some/all quals. The unsafeFlags[]
+ * array will be consulted later by qual_is_pushdown_safe(). It's better to
+ * do it this way than to make the checks directly in qual_is_pushdown_safe(),
+ * because when the subquery involves set operations we have to check the
+ * output expressions in each arm of the set op.
+ *
+ * Note: pushing quals into a DISTINCT subquery is theoretically dubious:
+ * we're effectively assuming that the quals cannot distinguish values that
+ * the DISTINCT's equality operator sees as equal, yet there are many
+ * counterexamples to that assumption. However use of such a qual with a
+ * DISTINCT subquery would be unsafe anyway, since there's no guarantee which
+ * "equal" value will be chosen as the output value by the DISTINCT operation.
+ * So we don't worry too much about that. Another objection is that if the
+ * qual is expensive to evaluate, running it for each original row might cost
+ * more than we save by eliminating rows before the DISTINCT step. But it
+ * would be very hard to estimate that at this stage, and in practice pushdown
+ * seldom seems to make things worse, so we ignore that problem too.
+ *
+ * Note: likewise, pushing quals into a subquery with window functions is a
+ * bit dubious: the quals might remove some rows of a window partition while
+ * leaving others, causing changes in the window functions' results for the
+ * surviving rows. We insist that such a qual reference only partitioning
+ * columns, but again that only protects us if the qual does not distinguish
+ * values that the partitioning equality operator sees as equal. The risks
+ * here are perhaps larger than for DISTINCT, since no de-duplication of rows
+ * occurs and thus there is no theoretical problem with such a qual. But
+ * we'll do this anyway because the potential performance benefits are very
+ * large, and we've seen no field complaints about the longstanding comparable
+ * behavior with DISTINCT.
+ */
+static bool
+subquery_is_pushdown_safe(Query *subquery, Query *topquery,
+ pushdown_safety_info *safetyInfo)
+{
+ SetOperationStmt *topop;
+
+ /* Check point 1 */
+ if (subquery->limitOffset != NULL || subquery->limitCount != NULL)
+ return false;
+
+ /* Check point 6 */
+ if (subquery->groupClause && subquery->groupingSets)
+ return false;
+
+ /* Check points 3, 4, and 5 */
+ if (subquery->distinctClause ||
+ subquery->hasWindowFuncs ||
+ subquery->hasTargetSRFs)
+ safetyInfo->unsafeVolatile = true;
+
+ /*
+ * If we're at a leaf query, check for unsafe expressions in its target
+ * list, and mark any reasons why they're unsafe in unsafeFlags[].
+ * (Non-leaf nodes in setop trees have only simple Vars in their tlists,
+ * so no need to check them.)
+ */
+ if (subquery->setOperations == NULL)
+ check_output_expressions(subquery, safetyInfo);
+
+ /* Are we at top level, or looking at a setop component? */
+ if (subquery == topquery)
+ {
+ /* Top level, so check any component queries */
+ if (subquery->setOperations != NULL)
+ if (!recurse_pushdown_safe(subquery->setOperations, topquery,
+ safetyInfo))
+ return false;
+ }
+ else
+ {
+ /* Setop component must not have more components (too weird) */
+ if (subquery->setOperations != NULL)
+ return false;
+ /* Check whether setop component output types match top level */
+ topop = castNode(SetOperationStmt, topquery->setOperations);
+ Assert(topop);
+ compare_tlist_datatypes(subquery->targetList,
+ topop->colTypes,
+ safetyInfo);
+ }
+ return true;
+}
+
+/*
+ * Helper routine to recurse through setOperations tree
+ */
+static bool
+recurse_pushdown_safe(Node *setOp, Query *topquery,
+ pushdown_safety_info *safetyInfo)
+{
+ if (IsA(setOp, RangeTblRef))
+ {
+ RangeTblRef *rtr = (RangeTblRef *) setOp;
+ RangeTblEntry *rte = rt_fetch(rtr->rtindex, topquery->rtable);
+ Query *subquery = rte->subquery;
+
+ Assert(subquery != NULL);
+ return subquery_is_pushdown_safe(subquery, topquery, safetyInfo);
+ }
+ else if (IsA(setOp, SetOperationStmt))
+ {
+ SetOperationStmt *op = (SetOperationStmt *) setOp;
+
+ /* EXCEPT is no good (point 2 for subquery_is_pushdown_safe) */
+ if (op->op == SETOP_EXCEPT)
+ return false;
+ /* Else recurse */
+ if (!recurse_pushdown_safe(op->larg, topquery, safetyInfo))
+ return false;
+ if (!recurse_pushdown_safe(op->rarg, topquery, safetyInfo))
+ return false;
+ }
+ else
+ {
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(setOp));
+ }
+ return true;
+}
+
+/*
+ * check_output_expressions - check subquery's output expressions for safety
+ *
+ * There are several cases in which it's unsafe to push down an upper-level
+ * qual if it references a particular output column of a subquery. We check
+ * each output column of the subquery and set flags in unsafeFlags[k] when we
+ * see that column is unsafe for a pushed-down qual to reference. The
+ * conditions checked here are:
+ *
+ * 1. We must not push down any quals that refer to subselect outputs that
+ * return sets, else we'd introduce functions-returning-sets into the
+ * subquery's WHERE/HAVING quals.
+ *
+ * 2. We must not push down any quals that refer to subselect outputs that
+ * contain volatile functions, for fear of introducing strange results due
+ * to multiple evaluation of a volatile function.
+ *
+ * 3. If the subquery uses DISTINCT ON, we must not push down any quals that
+ * refer to non-DISTINCT output columns, because that could change the set
+ * of rows returned. (This condition is vacuous for DISTINCT, because then
+ * there are no non-DISTINCT output columns, so we needn't check. Note that
+ * subquery_is_pushdown_safe already reported that we can't use volatile
+ * quals if there's DISTINCT or DISTINCT ON.)
+ *
+ * 4. If the subquery has any window functions, we must not push down quals
+ * that reference any output columns that are not listed in all the subquery's
+ * window PARTITION BY clauses. We can push down quals that use only
+ * partitioning columns because they should succeed or fail identically for
+ * every row of any one window partition, and totally excluding some
+ * partitions will not change a window function's results for remaining
+ * partitions. (Again, this also requires nonvolatile quals, but
+ * subquery_is_pushdown_safe handles that.). Subquery columns marked as
+ * unsafe for this reason can still have WindowClause run conditions pushed
+ * down.
+ */
+static void
+check_output_expressions(Query *subquery, pushdown_safety_info *safetyInfo)
+{
+ ListCell *lc;
+
+ foreach(lc, subquery->targetList)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lc);
+
+ if (tle->resjunk)
+ continue; /* ignore resjunk columns */
+
+ /* Functions returning sets are unsafe (point 1) */
+ if (subquery->hasTargetSRFs &&
+ (safetyInfo->unsafeFlags[tle->resno] &
+ UNSAFE_HAS_SET_FUNC) == 0 &&
+ expression_returns_set((Node *) tle->expr))
+ {
+ safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_SET_FUNC;
+ continue;
+ }
+
+ /* Volatile functions are unsafe (point 2) */
+ if ((safetyInfo->unsafeFlags[tle->resno] &
+ UNSAFE_HAS_VOLATILE_FUNC) == 0 &&
+ contain_volatile_functions((Node *) tle->expr))
+ {
+ safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_VOLATILE_FUNC;
+ continue;
+ }
+
+ /* If subquery uses DISTINCT ON, check point 3 */
+ if (subquery->hasDistinctOn &&
+ (safetyInfo->unsafeFlags[tle->resno] &
+ UNSAFE_NOTIN_DISTINCTON_CLAUSE) == 0 &&
+ !targetIsInSortList(tle, InvalidOid, subquery->distinctClause))
+ {
+ /* non-DISTINCT column, so mark it unsafe */
+ safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_NOTIN_DISTINCTON_CLAUSE;
+ continue;
+ }
+
+ /* If subquery uses window functions, check point 4 */
+ if (subquery->hasWindowFuncs &&
+ (safetyInfo->unsafeFlags[tle->resno] &
+ UNSAFE_NOTIN_DISTINCTON_CLAUSE) == 0 &&
+ !targetIsInAllPartitionLists(tle, subquery))
+ {
+ /* not present in all PARTITION BY clauses, so mark it unsafe */
+ safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_NOTIN_PARTITIONBY_CLAUSE;
+ continue;
+ }
+ }
+}
+
+/*
+ * For subqueries using UNION/UNION ALL/INTERSECT/INTERSECT ALL, we can
+ * push quals into each component query, but the quals can only reference
+ * subquery columns that suffer no type coercions in the set operation.
+ * Otherwise there are possible semantic gotchas. So, we check the
+ * component queries to see if any of them have output types different from
+ * the top-level setop outputs. We set the UNSAFE_TYPE_MISMATCH bit in
+ * unsafeFlags[k] if column k has different type in any component.
+ *
+ * We don't have to care about typmods here: the only allowed difference
+ * between set-op input and output typmods is input is a specific typmod
+ * and output is -1, and that does not require a coercion.
+ *
+ * tlist is a subquery tlist.
+ * colTypes is an OID list of the top-level setop's output column types.
+ * safetyInfo is the pushdown_safety_info to set unsafeFlags[] for.
+ */
+static void
+compare_tlist_datatypes(List *tlist, List *colTypes,
+ pushdown_safety_info *safetyInfo)
+{
+ ListCell *l;
+ ListCell *colType = list_head(colTypes);
+
+ foreach(l, tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (tle->resjunk)
+ continue; /* ignore resjunk columns */
+ if (colType == NULL)
+ elog(ERROR, "wrong number of tlist entries");
+ if (exprType((Node *) tle->expr) != lfirst_oid(colType))
+ safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_TYPE_MISMATCH;
+ colType = lnext(colTypes, colType);
+ }
+ if (colType != NULL)
+ elog(ERROR, "wrong number of tlist entries");
+}
+
+/*
+ * targetIsInAllPartitionLists
+ * True if the TargetEntry is listed in the PARTITION BY clause
+ * of every window defined in the query.
+ *
+ * It would be safe to ignore windows not actually used by any window
+ * function, but it's not easy to get that info at this stage; and it's
+ * unlikely to be useful to spend any extra cycles getting it, since
+ * unreferenced window definitions are probably infrequent in practice.
+ */
+static bool
+targetIsInAllPartitionLists(TargetEntry *tle, Query *query)
+{
+ ListCell *lc;
+
+ foreach(lc, query->windowClause)
+ {
+ WindowClause *wc = (WindowClause *) lfirst(lc);
+
+ if (!targetIsInSortList(tle, InvalidOid, wc->partitionClause))
+ return false;
+ }
+ return true;
+}
+
+/*
+ * qual_is_pushdown_safe - is a particular rinfo safe to push down?
+ *
+ * rinfo is a restriction clause applying to the given subquery (whose RTE
+ * has index rti in the parent query).
+ *
+ * Conditions checked here:
+ *
+ * 1. rinfo's clause must not contain any SubPlans (mainly because it's
+ * unclear that it will work correctly: SubLinks will already have been
+ * transformed into SubPlans in the qual, but not in the subquery). Note that
+ * SubLinks that transform to initplans are safe, and will be accepted here
+ * because what we'll see in the qual is just a Param referencing the initplan
+ * output.
+ *
+ * 2. If unsafeVolatile is set, rinfo's clause must not contain any volatile
+ * functions.
+ *
+ * 3. If unsafeLeaky is set, rinfo's clause must not contain any leaky
+ * functions that are passed Var nodes, and therefore might reveal values from
+ * the subquery as side effects.
+ *
+ * 4. rinfo's clause must not refer to the whole-row output of the subquery
+ * (since there is no easy way to name that within the subquery itself).
+ *
+ * 5. rinfo's clause must not refer to any subquery output columns that were
+ * found to be unsafe to reference by subquery_is_pushdown_safe().
+ */
+static pushdown_safe_type
+qual_is_pushdown_safe(Query *subquery, Index rti, RestrictInfo *rinfo,
+ pushdown_safety_info *safetyInfo)
+{
+ pushdown_safe_type safe = PUSHDOWN_SAFE;
+ Node *qual = (Node *) rinfo->clause;
+ List *vars;
+ ListCell *vl;
+
+ /* Refuse subselects (point 1) */
+ if (contain_subplans(qual))
+ return PUSHDOWN_UNSAFE;
+
+ /* Refuse volatile quals if we found they'd be unsafe (point 2) */
+ if (safetyInfo->unsafeVolatile &&
+ contain_volatile_functions((Node *) rinfo))
+ return PUSHDOWN_UNSAFE;
+
+ /* Refuse leaky quals if told to (point 3) */
+ if (safetyInfo->unsafeLeaky &&
+ contain_leaked_vars(qual))
+ return PUSHDOWN_UNSAFE;
+
+ /*
+ * It would be unsafe to push down window function calls, but at least for
+ * the moment we could never see any in a qual anyhow. (The same applies
+ * to aggregates, which we check for in pull_var_clause below.)
+ */
+ Assert(!contain_window_function(qual));
+
+ /*
+ * Examine all Vars used in clause. Since it's a restriction clause, all
+ * such Vars must refer to subselect output columns ... unless this is
+ * part of a LATERAL subquery, in which case there could be lateral
+ * references.
+ */
+ vars = pull_var_clause(qual, PVC_INCLUDE_PLACEHOLDERS);
+ foreach(vl, vars)
+ {
+ Var *var = (Var *) lfirst(vl);
+
+ /*
+ * XXX Punt if we find any PlaceHolderVars in the restriction clause.
+ * It's not clear whether a PHV could safely be pushed down, and even
+ * less clear whether such a situation could arise in any cases of
+ * practical interest anyway. So for the moment, just refuse to push
+ * down.
+ */
+ if (!IsA(var, Var))
+ {
+ safe = PUSHDOWN_UNSAFE;
+ break;
+ }
+
+ /*
+ * Punt if we find any lateral references. It would be safe to push
+ * these down, but we'd have to convert them into outer references,
+ * which subquery_push_qual lacks the infrastructure to do. The case
+ * arises so seldom that it doesn't seem worth working hard on.
+ */
+ if (var->varno != rti)
+ {
+ safe = PUSHDOWN_UNSAFE;
+ break;
+ }
+
+ /* Subqueries have no system columns */
+ Assert(var->varattno >= 0);
+
+ /* Check point 4 */
+ if (var->varattno == 0)
+ {
+ safe = PUSHDOWN_UNSAFE;
+ break;
+ }
+
+ /* Check point 5 */
+ if (safetyInfo->unsafeFlags[var->varattno] != 0)
+ {
+ if (safetyInfo->unsafeFlags[var->varattno] &
+ (UNSAFE_HAS_VOLATILE_FUNC | UNSAFE_HAS_SET_FUNC |
+ UNSAFE_NOTIN_DISTINCTON_CLAUSE | UNSAFE_TYPE_MISMATCH))
+ {
+ safe = PUSHDOWN_UNSAFE;
+ break;
+ }
+ else
+ {
+ /* UNSAFE_NOTIN_PARTITIONBY_CLAUSE is ok for run conditions */
+ safe = PUSHDOWN_WINDOWCLAUSE_RUNCOND;
+ /* don't break, we might find another Var that's unsafe */
+ }
+ }
+ }
+
+ list_free(vars);
+
+ return safe;
+}
+
+/*
+ * subquery_push_qual - push down a qual that we have determined is safe
+ */
+static void
+subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
+{
+ if (subquery->setOperations != NULL)
+ {
+ /* Recurse to push it separately to each component query */
+ recurse_push_qual(subquery->setOperations, subquery,
+ rte, rti, qual);
+ }
+ else
+ {
+ /*
+ * We need to replace Vars in the qual (which must refer to outputs of
+ * the subquery) with copies of the subquery's targetlist expressions.
+ * Note that at this point, any uplevel Vars in the qual should have
+ * been replaced with Params, so they need no work.
+ *
+ * This step also ensures that when we are pushing into a setop tree,
+ * each component query gets its own copy of the qual.
+ */
+ qual = ReplaceVarsFromTargetList(qual, rti, 0, rte,
+ subquery->targetList,
+ REPLACEVARS_REPORT_ERROR, 0,
+ &subquery->hasSubLinks);
+
+ /*
+ * Now attach the qual to the proper place: normally WHERE, but if the
+ * subquery uses grouping or aggregation, put it in HAVING (since the
+ * qual really refers to the group-result rows).
+ */
+ if (subquery->hasAggs || subquery->groupClause || subquery->groupingSets || subquery->havingQual)
+ subquery->havingQual = make_and_qual(subquery->havingQual, qual);
+ else
+ subquery->jointree->quals =
+ make_and_qual(subquery->jointree->quals, qual);
+
+ /*
+ * We need not change the subquery's hasAggs or hasSubLinks flags,
+ * since we can't be pushing down any aggregates that weren't there
+ * before, and we don't push down subselects at all.
+ */
+ }
+}
+
+/*
+ * Helper routine to recurse through setOperations tree
+ */
+static void
+recurse_push_qual(Node *setOp, Query *topquery,
+ RangeTblEntry *rte, Index rti, Node *qual)
+{
+ if (IsA(setOp, RangeTblRef))
+ {
+ RangeTblRef *rtr = (RangeTblRef *) setOp;
+ RangeTblEntry *subrte = rt_fetch(rtr->rtindex, topquery->rtable);
+ Query *subquery = subrte->subquery;
+
+ Assert(subquery != NULL);
+ subquery_push_qual(subquery, rte, rti, qual);
+ }
+ else if (IsA(setOp, SetOperationStmt))
+ {
+ SetOperationStmt *op = (SetOperationStmt *) setOp;
+
+ recurse_push_qual(op->larg, topquery, rte, rti, qual);
+ recurse_push_qual(op->rarg, topquery, rte, rti, qual);
+ }
+ else
+ {
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(setOp));
+ }
+}
+
+/*****************************************************************************
+ * SIMPLIFYING SUBQUERY TARGETLISTS
+ *****************************************************************************/
+
+/*
+ * remove_unused_subquery_outputs
+ * Remove subquery targetlist items we don't need
+ *
+ * It's possible, even likely, that the upper query does not read all the
+ * output columns of the subquery. We can remove any such outputs that are
+ * not needed by the subquery itself (e.g., as sort/group columns) and do not
+ * affect semantics otherwise (e.g., volatile functions can't be removed).
+ * This is useful not only because we might be able to remove expensive-to-
+ * compute expressions, but because deletion of output columns might allow
+ * optimizations such as join removal to occur within the subquery.
+ *
+ * extra_used_attrs can be passed as non-NULL to mark any columns (offset by
+ * FirstLowInvalidHeapAttributeNumber) that we should not remove. This
+ * parameter is modifed by the function, so callers must make a copy if they
+ * need to use the passed in Bitmapset after calling this function.
+ *
+ * To avoid affecting column numbering in the targetlist, we don't physically
+ * remove unused tlist entries, but rather replace their expressions with NULL
+ * constants. This is implemented by modifying subquery->targetList.
+ */
+static void
+remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel,
+ Bitmapset *extra_used_attrs)
+{
+ Bitmapset *attrs_used;
+ ListCell *lc;
+
+ /*
+ * Just point directly to extra_used_attrs. No need to bms_copy as none of
+ * the current callers use the Bitmapset after calling this function.
+ */
+ attrs_used = extra_used_attrs;
+
+ /*
+ * Do nothing if subquery has UNION/INTERSECT/EXCEPT: in principle we
+ * could update all the child SELECTs' tlists, but it seems not worth the
+ * trouble presently.
+ */
+ if (subquery->setOperations)
+ return;
+
+ /*
+ * If subquery has regular DISTINCT (not DISTINCT ON), we're wasting our
+ * time: all its output columns must be used in the distinctClause.
+ */
+ if (subquery->distinctClause && !subquery->hasDistinctOn)
+ return;
+
+ /*
+ * Collect a bitmap of all the output column numbers used by the upper
+ * query.
+ *
+ * Add all the attributes needed for joins or final output. Note: we must
+ * look at rel's targetlist, not the attr_needed data, because attr_needed
+ * isn't computed for inheritance child rels, cf set_append_rel_size().
+ * (XXX might be worth changing that sometime.)
+ */
+ pull_varattnos((Node *) rel->reltarget->exprs, rel->relid, &attrs_used);
+
+ /* Add all the attributes used by un-pushed-down restriction clauses. */
+ foreach(lc, rel->baserestrictinfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
+ }
+
+ /*
+ * If there's a whole-row reference to the subquery, we can't remove
+ * anything.
+ */
+ if (bms_is_member(0 - FirstLowInvalidHeapAttributeNumber, attrs_used))
+ return;
+
+ /*
+ * Run through the tlist and zap entries we don't need. It's okay to
+ * modify the tlist items in-place because set_subquery_pathlist made a
+ * copy of the subquery.
+ */
+ foreach(lc, subquery->targetList)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lc);
+ Node *texpr = (Node *) tle->expr;
+
+ /*
+ * If it has a sortgroupref number, it's used in some sort/group
+ * clause so we'd better not remove it. Also, don't remove any
+ * resjunk columns, since their reason for being has nothing to do
+ * with anybody reading the subquery's output. (It's likely that
+ * resjunk columns in a sub-SELECT would always have ressortgroupref
+ * set, but even if they don't, it seems imprudent to remove them.)
+ */
+ if (tle->ressortgroupref || tle->resjunk)
+ continue;
+
+ /*
+ * If it's used by the upper query, we can't remove it.
+ */
+ if (bms_is_member(tle->resno - FirstLowInvalidHeapAttributeNumber,
+ attrs_used))
+ continue;
+
+ /*
+ * If it contains a set-returning function, we can't remove it since
+ * that could change the number of rows returned by the subquery.
+ */
+ if (subquery->hasTargetSRFs &&
+ expression_returns_set(texpr))
+ continue;
+
+ /*
+ * If it contains volatile functions, we daren't remove it for fear
+ * that the user is expecting their side-effects to happen.
+ */
+ if (contain_volatile_functions(texpr))
+ continue;
+
+ /*
+ * OK, we don't need it. Replace the expression with a NULL constant.
+ * Preserve the exposed type of the expression, in case something
+ * looks at the rowtype of the subquery's result.
+ */
+ tle->expr = (Expr *) makeNullConst(exprType(texpr),
+ exprTypmod(texpr),
+ exprCollation(texpr));
+ }
+}
+
+/*
+ * create_partial_bitmap_paths
+ * Build partial bitmap heap path for the relation
+ */
+void
+create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel,
+ Path *bitmapqual)
+{
+ int parallel_workers;
+ double pages_fetched;
+
+ /* Compute heap pages for bitmap heap scan */
+ pages_fetched = compute_bitmap_pages(root, rel, bitmapqual, 1.0,
+ NULL, NULL);
+
+ parallel_workers = compute_parallel_worker(rel, pages_fetched, -1,
+ max_parallel_workers_per_gather);
+
+ if (parallel_workers <= 0)
+ return;
+
+ add_partial_path(rel, (Path *) create_bitmap_heap_path(root, rel,
+ bitmapqual, rel->lateral_relids, 1.0, parallel_workers));
+}
+
+/*
+ * Compute the number of parallel workers that should be used to scan a
+ * relation. We compute the parallel workers based on the size of the heap to
+ * be scanned and the size of the index to be scanned, then choose a minimum
+ * of those.
+ *
+ * "heap_pages" is the number of pages from the table that we expect to scan, or
+ * -1 if we don't expect to scan any.
+ *
+ * "index_pages" is the number of pages from the index that we expect to scan, or
+ * -1 if we don't expect to scan any.
+ *
+ * "max_workers" is caller's limit on the number of workers. This typically
+ * comes from a GUC.
+ */
+int
+compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages,
+ int max_workers)
+{
+ int parallel_workers = 0;
+
+ /*
+ * If the user has set the parallel_workers reloption, use that; otherwise
+ * select a default number of workers.
+ */
+ if (rel->rel_parallel_workers != -1)
+ parallel_workers = rel->rel_parallel_workers;
+ else
+ {
+ /*
+ * If the number of pages being scanned is insufficient to justify a
+ * parallel scan, just return zero ... unless it's an inheritance
+ * child. In that case, we want to generate a parallel path here
+ * anyway. It might not be worthwhile just for this relation, but
+ * when combined with all of its inheritance siblings it may well pay
+ * off.
+ */
+ if (rel->reloptkind == RELOPT_BASEREL &&
+ ((heap_pages >= 0 && heap_pages < min_parallel_table_scan_size) ||
+ (index_pages >= 0 && index_pages < min_parallel_index_scan_size)))
+ return 0;
+
+ if (heap_pages >= 0)
+ {
+ int heap_parallel_threshold;
+ int heap_parallel_workers = 1;
+
+ /*
+ * Select the number of workers based on the log of the size of
+ * the relation. This probably needs to be a good deal more
+ * sophisticated, but we need something here for now. Note that
+ * the upper limit of the min_parallel_table_scan_size GUC is
+ * chosen to prevent overflow here.
+ */
+ heap_parallel_threshold = Max(min_parallel_table_scan_size, 1);
+ while (heap_pages >= (BlockNumber) (heap_parallel_threshold * 3))
+ {
+ heap_parallel_workers++;
+ heap_parallel_threshold *= 3;
+ if (heap_parallel_threshold > INT_MAX / 3)
+ break; /* avoid overflow */
+ }
+
+ parallel_workers = heap_parallel_workers;
+ }
+
+ if (index_pages >= 0)
+ {
+ int index_parallel_workers = 1;
+ int index_parallel_threshold;
+
+ /* same calculation as for heap_pages above */
+ index_parallel_threshold = Max(min_parallel_index_scan_size, 1);
+ while (index_pages >= (BlockNumber) (index_parallel_threshold * 3))
+ {
+ index_parallel_workers++;
+ index_parallel_threshold *= 3;
+ if (index_parallel_threshold > INT_MAX / 3)
+ break; /* avoid overflow */
+ }
+
+ if (parallel_workers > 0)
+ parallel_workers = Min(parallel_workers, index_parallel_workers);
+ else
+ parallel_workers = index_parallel_workers;
+ }
+ }
+
+ /* In no case use more than caller supplied maximum number of workers */
+ parallel_workers = Min(parallel_workers, max_workers);
+
+ return parallel_workers;
+}
+
+/*
+ * generate_partitionwise_join_paths
+ * Create paths representing partitionwise join for given partitioned
+ * join relation.
+ *
+ * This must not be called until after we are done adding paths for all
+ * child-joins. Otherwise, add_path might delete a path to which some path
+ * generated here has a reference.
+ */
+void
+generate_partitionwise_join_paths(PlannerInfo *root, RelOptInfo *rel)
+{
+ List *live_children = NIL;
+ int cnt_parts;
+ int num_parts;
+ RelOptInfo **part_rels;
+
+ /* Handle only join relations here. */
+ if (!IS_JOIN_REL(rel))
+ return;
+
+ /* We've nothing to do if the relation is not partitioned. */
+ if (!IS_PARTITIONED_REL(rel))
+ return;
+
+ /* The relation should have consider_partitionwise_join set. */
+ Assert(rel->consider_partitionwise_join);
+
+ /* Guard against stack overflow due to overly deep partition hierarchy. */
+ check_stack_depth();
+
+ num_parts = rel->nparts;
+ part_rels = rel->part_rels;
+
+ /* Collect non-dummy child-joins. */
+ for (cnt_parts = 0; cnt_parts < num_parts; cnt_parts++)
+ {
+ RelOptInfo *child_rel = part_rels[cnt_parts];
+
+ /* If it's been pruned entirely, it's certainly dummy. */
+ if (child_rel == NULL)
+ continue;
+
+ /* Make partitionwise join paths for this partitioned child-join. */
+ generate_partitionwise_join_paths(root, child_rel);
+
+ /* If we failed to make any path for this child, we must give up. */
+ if (child_rel->pathlist == NIL)
+ {
+ /*
+ * Mark the parent joinrel as unpartitioned so that later
+ * functions treat it correctly.
+ */
+ rel->nparts = 0;
+ return;
+ }
+
+ /* Else, identify the cheapest path for it. */
+ set_cheapest(child_rel);
+
+ /* Dummy children need not be scanned, so ignore those. */
+ if (IS_DUMMY_REL(child_rel))
+ continue;
+
+#ifdef OPTIMIZER_DEBUG
+ debug_print_rel(root, child_rel);
+#endif
+
+ live_children = lappend(live_children, child_rel);
+ }
+
+ /* If all child-joins are dummy, parent join is also dummy. */
+ if (!live_children)
+ {
+ mark_dummy_rel(rel);
+ return;
+ }
+
+ /* Build additional paths for this rel from child-join paths. */
+ add_paths_to_append_rel(root, rel, live_children);
+ list_free(live_children);
+}
+
+
+/*****************************************************************************
+ * DEBUG SUPPORT
+ *****************************************************************************/
+
+#ifdef OPTIMIZER_DEBUG
+
+static void
+print_relids(PlannerInfo *root, Relids relids)
+{
+ int x;
+ bool first = true;
+
+ x = -1;
+ while ((x = bms_next_member(relids, x)) >= 0)
+ {
+ if (!first)
+ printf(" ");
+ if (x < root->simple_rel_array_size &&
+ root->simple_rte_array[x])
+ printf("%s", root->simple_rte_array[x]->eref->aliasname);
+ else
+ printf("%d", x);
+ first = false;
+ }
+}
+
+static void
+print_restrictclauses(PlannerInfo *root, List *clauses)
+{
+ ListCell *l;
+
+ foreach(l, clauses)
+ {
+ RestrictInfo *c = lfirst(l);
+
+ print_expr((Node *) c->clause, root->parse->rtable);
+ if (lnext(clauses, l))
+ printf(", ");
+ }
+}
+
+static void
+print_path(PlannerInfo *root, Path *path, int indent)
+{
+ const char *ptype;
+ bool join = false;
+ Path *subpath = NULL;
+ int i;
+
+ switch (nodeTag(path))
+ {
+ case T_Path:
+ switch (path->pathtype)
+ {
+ case T_SeqScan:
+ ptype = "SeqScan";
+ break;
+ case T_SampleScan:
+ ptype = "SampleScan";
+ break;
+ case T_FunctionScan:
+ ptype = "FunctionScan";
+ break;
+ case T_TableFuncScan:
+ ptype = "TableFuncScan";
+ break;
+ case T_ValuesScan:
+ ptype = "ValuesScan";
+ break;
+ case T_CteScan:
+ ptype = "CteScan";
+ break;
+ case T_NamedTuplestoreScan:
+ ptype = "NamedTuplestoreScan";
+ break;
+ case T_Result:
+ ptype = "Result";
+ break;
+ case T_WorkTableScan:
+ ptype = "WorkTableScan";
+ break;
+ default:
+ ptype = "???Path";
+ break;
+ }
+ break;
+ case T_IndexPath:
+ ptype = "IdxScan";
+ break;
+ case T_BitmapHeapPath:
+ ptype = "BitmapHeapScan";
+ break;
+ case T_BitmapAndPath:
+ ptype = "BitmapAndPath";
+ break;
+ case T_BitmapOrPath:
+ ptype = "BitmapOrPath";
+ break;
+ case T_TidPath:
+ ptype = "TidScan";
+ break;
+ case T_TidRangePath:
+ ptype = "TidRangePath";
+ break;
+ case T_SubqueryScanPath:
+ ptype = "SubqueryScan";
+ break;
+ case T_ForeignPath:
+ ptype = "ForeignScan";
+ break;
+ case T_CustomPath:
+ ptype = "CustomScan";
+ break;
+ case T_NestPath:
+ ptype = "NestLoop";
+ join = true;
+ break;
+ case T_MergePath:
+ ptype = "MergeJoin";
+ join = true;
+ break;
+ case T_HashPath:
+ ptype = "HashJoin";
+ join = true;
+ break;
+ case T_AppendPath:
+ ptype = "Append";
+ break;
+ case T_MergeAppendPath:
+ ptype = "MergeAppend";
+ break;
+ case T_GroupResultPath:
+ ptype = "GroupResult";
+ break;
+ case T_MaterialPath:
+ ptype = "Material";
+ subpath = ((MaterialPath *) path)->subpath;
+ break;
+ case T_MemoizePath:
+ ptype = "Memoize";
+ subpath = ((MemoizePath *) path)->subpath;
+ break;
+ case T_UniquePath:
+ ptype = "Unique";
+ subpath = ((UniquePath *) path)->subpath;
+ break;
+ case T_GatherPath:
+ ptype = "Gather";
+ subpath = ((GatherPath *) path)->subpath;
+ break;
+ case T_GatherMergePath:
+ ptype = "GatherMerge";
+ subpath = ((GatherMergePath *) path)->subpath;
+ break;
+ case T_ProjectionPath:
+ ptype = "Projection";
+ subpath = ((ProjectionPath *) path)->subpath;
+ break;
+ case T_ProjectSetPath:
+ ptype = "ProjectSet";
+ subpath = ((ProjectSetPath *) path)->subpath;
+ break;
+ case T_SortPath:
+ ptype = "Sort";
+ subpath = ((SortPath *) path)->subpath;
+ break;
+ case T_IncrementalSortPath:
+ ptype = "IncrementalSort";
+ subpath = ((SortPath *) path)->subpath;
+ break;
+ case T_GroupPath:
+ ptype = "Group";
+ subpath = ((GroupPath *) path)->subpath;
+ break;
+ case T_UpperUniquePath:
+ ptype = "UpperUnique";
+ subpath = ((UpperUniquePath *) path)->subpath;
+ break;
+ case T_AggPath:
+ ptype = "Agg";
+ subpath = ((AggPath *) path)->subpath;
+ break;
+ case T_GroupingSetsPath:
+ ptype = "GroupingSets";
+ subpath = ((GroupingSetsPath *) path)->subpath;
+ break;
+ case T_MinMaxAggPath:
+ ptype = "MinMaxAgg";
+ break;
+ case T_WindowAggPath:
+ ptype = "WindowAgg";
+ subpath = ((WindowAggPath *) path)->subpath;
+ break;
+ case T_SetOpPath:
+ ptype = "SetOp";
+ subpath = ((SetOpPath *) path)->subpath;
+ break;
+ case T_RecursiveUnionPath:
+ ptype = "RecursiveUnion";
+ break;
+ case T_LockRowsPath:
+ ptype = "LockRows";
+ subpath = ((LockRowsPath *) path)->subpath;
+ break;
+ case T_ModifyTablePath:
+ ptype = "ModifyTable";
+ break;
+ case T_LimitPath:
+ ptype = "Limit";
+ subpath = ((LimitPath *) path)->subpath;
+ break;
+ default:
+ ptype = "???Path";
+ break;
+ }
+
+ for (i = 0; i < indent; i++)
+ printf("\t");
+ printf("%s", ptype);
+
+ if (path->parent)
+ {
+ printf("(");
+ print_relids(root, path->parent->relids);
+ printf(")");
+ }
+ if (path->param_info)
+ {
+ printf(" required_outer (");
+ print_relids(root, path->param_info->ppi_req_outer);
+ printf(")");
+ }
+ printf(" rows=%.0f cost=%.2f..%.2f\n",
+ path->rows, path->startup_cost, path->total_cost);
+
+ if (path->pathkeys)
+ {
+ for (i = 0; i < indent; i++)
+ printf("\t");
+ printf(" pathkeys: ");
+ print_pathkeys(path->pathkeys, root->parse->rtable);
+ }
+
+ if (join)
+ {
+ JoinPath *jp = (JoinPath *) path;
+
+ for (i = 0; i < indent; i++)
+ printf("\t");
+ printf(" clauses: ");
+ print_restrictclauses(root, jp->joinrestrictinfo);
+ printf("\n");
+
+ if (IsA(path, MergePath))
+ {
+ MergePath *mp = (MergePath *) path;
+
+ for (i = 0; i < indent; i++)
+ printf("\t");
+ printf(" sortouter=%d sortinner=%d materializeinner=%d\n",
+ ((mp->outersortkeys) ? 1 : 0),
+ ((mp->innersortkeys) ? 1 : 0),
+ ((mp->materialize_inner) ? 1 : 0));
+ }
+
+ print_path(root, jp->outerjoinpath, indent + 1);
+ print_path(root, jp->innerjoinpath, indent + 1);
+ }
+
+ if (subpath)
+ print_path(root, subpath, indent + 1);
+}
+
+void
+debug_print_rel(PlannerInfo *root, RelOptInfo *rel)
+{
+ ListCell *l;
+
+ printf("RELOPTINFO (");
+ print_relids(root, rel->relids);
+ printf("): rows=%.0f width=%d\n", rel->rows, rel->reltarget->width);
+
+ if (rel->baserestrictinfo)
+ {
+ printf("\tbaserestrictinfo: ");
+ print_restrictclauses(root, rel->baserestrictinfo);
+ printf("\n");
+ }
+
+ if (rel->joininfo)
+ {
+ printf("\tjoininfo: ");
+ print_restrictclauses(root, rel->joininfo);
+ printf("\n");
+ }
+
+ printf("\tpath list:\n");
+ foreach(l, rel->pathlist)
+ print_path(root, lfirst(l), 1);
+ if (rel->cheapest_parameterized_paths)
+ {
+ printf("\n\tcheapest parameterized paths:\n");
+ foreach(l, rel->cheapest_parameterized_paths)
+ print_path(root, lfirst(l), 1);
+ }
+ if (rel->cheapest_startup_path)
+ {
+ printf("\n\tcheapest startup path:\n");
+ print_path(root, rel->cheapest_startup_path, 1);
+ }
+ if (rel->cheapest_total_path)
+ {
+ printf("\n\tcheapest total path:\n");
+ print_path(root, rel->cheapest_total_path, 1);
+ }
+ printf("\n");
+ fflush(stdout);
+}
+
+#endif /* OPTIMIZER_DEBUG */
diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c
new file mode 100644
index 0000000..06f8363
--- /dev/null
+++ b/src/backend/optimizer/path/clausesel.c
@@ -0,0 +1,1000 @@
+/*-------------------------------------------------------------------------
+ *
+ * clausesel.c
+ * Routines to compute clause selectivities
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/path/clausesel.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/plancat.h"
+#include "statistics/statistics.h"
+#include "utils/fmgroids.h"
+#include "utils/lsyscache.h"
+#include "utils/selfuncs.h"
+
+/*
+ * Data structure for accumulating info about possible range-query
+ * clause pairs in clauselist_selectivity.
+ */
+typedef struct RangeQueryClause
+{
+ struct RangeQueryClause *next; /* next in linked list */
+ Node *var; /* The common variable of the clauses */
+ bool have_lobound; /* found a low-bound clause yet? */
+ bool have_hibound; /* found a high-bound clause yet? */
+ Selectivity lobound; /* Selectivity of a var > something clause */
+ Selectivity hibound; /* Selectivity of a var < something clause */
+} RangeQueryClause;
+
+static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
+ bool varonleft, bool isLTsel, Selectivity s2);
+static RelOptInfo *find_single_rel_for_clauses(PlannerInfo *root,
+ List *clauses);
+static Selectivity clauselist_selectivity_or(PlannerInfo *root,
+ List *clauses,
+ int varRelid,
+ JoinType jointype,
+ SpecialJoinInfo *sjinfo,
+ bool use_extended_stats);
+
+/****************************************************************************
+ * ROUTINES TO COMPUTE SELECTIVITIES
+ ****************************************************************************/
+
+/*
+ * clauselist_selectivity -
+ * Compute the selectivity of an implicitly-ANDed list of boolean
+ * expression clauses. The list can be empty, in which case 1.0
+ * must be returned. List elements may be either RestrictInfos
+ * or bare expression clauses --- the former is preferred since
+ * it allows caching of results.
+ *
+ * See clause_selectivity() for the meaning of the additional parameters.
+ *
+ * The basic approach is to apply extended statistics first, on as many
+ * clauses as possible, in order to capture cross-column dependencies etc.
+ * The remaining clauses are then estimated by taking the product of their
+ * selectivities, but that's only right if they have independent
+ * probabilities, and in reality they are often NOT independent even if they
+ * only refer to a single column. So, we want to be smarter where we can.
+ *
+ * We also recognize "range queries", such as "x > 34 AND x < 42". Clauses
+ * are recognized as possible range query components if they are restriction
+ * opclauses whose operators have scalarltsel or a related function as their
+ * restriction selectivity estimator. We pair up clauses of this form that
+ * refer to the same variable. An unpairable clause of this kind is simply
+ * multiplied into the selectivity product in the normal way. But when we
+ * find a pair, we know that the selectivities represent the relative
+ * positions of the low and high bounds within the column's range, so instead
+ * of figuring the selectivity as hisel * losel, we can figure it as hisel +
+ * losel - 1. (To visualize this, see that hisel is the fraction of the range
+ * below the high bound, while losel is the fraction above the low bound; so
+ * hisel can be interpreted directly as a 0..1 value but we need to convert
+ * losel to 1-losel before interpreting it as a value. Then the available
+ * range is 1-losel to hisel. However, this calculation double-excludes
+ * nulls, so really we need hisel + losel + null_frac - 1.)
+ *
+ * If either selectivity is exactly DEFAULT_INEQ_SEL, we forget this equation
+ * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
+ * yields an impossible (negative) result.
+ *
+ * A free side-effect is that we can recognize redundant inequalities such
+ * as "x < 4 AND x < 5"; only the tighter constraint will be counted.
+ *
+ * Of course this is all very dependent on the behavior of the inequality
+ * selectivity functions; perhaps some day we can generalize the approach.
+ */
+Selectivity
+clauselist_selectivity(PlannerInfo *root,
+ List *clauses,
+ int varRelid,
+ JoinType jointype,
+ SpecialJoinInfo *sjinfo)
+{
+ return clauselist_selectivity_ext(root, clauses, varRelid,
+ jointype, sjinfo, true);
+}
+
+/*
+ * clauselist_selectivity_ext -
+ * Extended version of clauselist_selectivity(). If "use_extended_stats"
+ * is false, all extended statistics will be ignored, and only per-column
+ * statistics will be used.
+ */
+Selectivity
+clauselist_selectivity_ext(PlannerInfo *root,
+ List *clauses,
+ int varRelid,
+ JoinType jointype,
+ SpecialJoinInfo *sjinfo,
+ bool use_extended_stats)
+{
+ Selectivity s1 = 1.0;
+ RelOptInfo *rel;
+ Bitmapset *estimatedclauses = NULL;
+ RangeQueryClause *rqlist = NULL;
+ ListCell *l;
+ int listidx;
+
+ /*
+ * If there's exactly one clause, just go directly to
+ * clause_selectivity_ext(). None of what we might do below is relevant.
+ */
+ if (list_length(clauses) == 1)
+ return clause_selectivity_ext(root, (Node *) linitial(clauses),
+ varRelid, jointype, sjinfo,
+ use_extended_stats);
+
+ /*
+ * Determine if these clauses reference a single relation. If so, and if
+ * it has extended statistics, try to apply those.
+ */
+ rel = find_single_rel_for_clauses(root, clauses);
+ if (use_extended_stats && rel && rel->rtekind == RTE_RELATION && rel->statlist != NIL)
+ {
+ /*
+ * Estimate as many clauses as possible using extended statistics.
+ *
+ * 'estimatedclauses' is populated with the 0-based list position
+ * index of clauses estimated here, and that should be ignored below.
+ */
+ s1 = statext_clauselist_selectivity(root, clauses, varRelid,
+ jointype, sjinfo, rel,
+ &estimatedclauses, false);
+ }
+
+ /*
+ * Apply normal selectivity estimates for remaining clauses. We'll be
+ * careful to skip any clauses which were already estimated above.
+ *
+ * Anything that doesn't look like a potential rangequery clause gets
+ * multiplied into s1 and forgotten. Anything that does gets inserted into
+ * an rqlist entry.
+ */
+ listidx = -1;
+ foreach(l, clauses)
+ {
+ Node *clause = (Node *) lfirst(l);
+ RestrictInfo *rinfo;
+ Selectivity s2;
+
+ listidx++;
+
+ /*
+ * Skip this clause if it's already been estimated by some other
+ * statistics above.
+ */
+ if (bms_is_member(listidx, estimatedclauses))
+ continue;
+
+ /* Compute the selectivity of this clause in isolation */
+ s2 = clause_selectivity_ext(root, clause, varRelid, jointype, sjinfo,
+ use_extended_stats);
+
+ /*
+ * Check for being passed a RestrictInfo.
+ *
+ * If it's a pseudoconstant RestrictInfo, then s2 is either 1.0 or
+ * 0.0; just use that rather than looking for range pairs.
+ */
+ if (IsA(clause, RestrictInfo))
+ {
+ rinfo = (RestrictInfo *) clause;
+ if (rinfo->pseudoconstant)
+ {
+ s1 = s1 * s2;
+ continue;
+ }
+ clause = (Node *) rinfo->clause;
+ }
+ else
+ rinfo = NULL;
+
+ /*
+ * See if it looks like a restriction clause with a pseudoconstant on
+ * one side. (Anything more complicated than that might not behave in
+ * the simple way we are expecting.) Most of the tests here can be
+ * done more efficiently with rinfo than without.
+ */
+ if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2)
+ {
+ OpExpr *expr = (OpExpr *) clause;
+ bool varonleft = true;
+ bool ok;
+
+ if (rinfo)
+ {
+ ok = (bms_membership(rinfo->clause_relids) == BMS_SINGLETON) &&
+ (is_pseudo_constant_clause_relids(lsecond(expr->args),
+ rinfo->right_relids) ||
+ (varonleft = false,
+ is_pseudo_constant_clause_relids(linitial(expr->args),
+ rinfo->left_relids)));
+ }
+ else
+ {
+ ok = (NumRelids(root, clause) == 1) &&
+ (is_pseudo_constant_clause(lsecond(expr->args)) ||
+ (varonleft = false,
+ is_pseudo_constant_clause(linitial(expr->args))));
+ }
+
+ if (ok)
+ {
+ /*
+ * If it's not a "<"/"<="/">"/">=" operator, just merge the
+ * selectivity in generically. But if it's the right oprrest,
+ * add the clause to rqlist for later processing.
+ */
+ switch (get_oprrest(expr->opno))
+ {
+ case F_SCALARLTSEL:
+ case F_SCALARLESEL:
+ addRangeClause(&rqlist, clause,
+ varonleft, true, s2);
+ break;
+ case F_SCALARGTSEL:
+ case F_SCALARGESEL:
+ addRangeClause(&rqlist, clause,
+ varonleft, false, s2);
+ break;
+ default:
+ /* Just merge the selectivity in generically */
+ s1 = s1 * s2;
+ break;
+ }
+ continue; /* drop to loop bottom */
+ }
+ }
+
+ /* Not the right form, so treat it generically. */
+ s1 = s1 * s2;
+ }
+
+ /*
+ * Now scan the rangequery pair list.
+ */
+ while (rqlist != NULL)
+ {
+ RangeQueryClause *rqnext;
+
+ if (rqlist->have_lobound && rqlist->have_hibound)
+ {
+ /* Successfully matched a pair of range clauses */
+ Selectivity s2;
+
+ /*
+ * Exact equality to the default value probably means the
+ * selectivity function punted. This is not airtight but should
+ * be good enough.
+ */
+ if (rqlist->hibound == DEFAULT_INEQ_SEL ||
+ rqlist->lobound == DEFAULT_INEQ_SEL)
+ {
+ s2 = DEFAULT_RANGE_INEQ_SEL;
+ }
+ else
+ {
+ s2 = rqlist->hibound + rqlist->lobound - 1.0;
+
+ /* Adjust for double-exclusion of NULLs */
+ s2 += nulltestsel(root, IS_NULL, rqlist->var,
+ varRelid, jointype, sjinfo);
+
+ /*
+ * A zero or slightly negative s2 should be converted into a
+ * small positive value; we probably are dealing with a very
+ * tight range and got a bogus result due to roundoff errors.
+ * However, if s2 is very negative, then we probably have
+ * default selectivity estimates on one or both sides of the
+ * range that we failed to recognize above for some reason.
+ */
+ if (s2 <= 0.0)
+ {
+ if (s2 < -0.01)
+ {
+ /*
+ * No data available --- use a default estimate that
+ * is small, but not real small.
+ */
+ s2 = DEFAULT_RANGE_INEQ_SEL;
+ }
+ else
+ {
+ /*
+ * It's just roundoff error; use a small positive
+ * value
+ */
+ s2 = 1.0e-10;
+ }
+ }
+ }
+ /* Merge in the selectivity of the pair of clauses */
+ s1 *= s2;
+ }
+ else
+ {
+ /* Only found one of a pair, merge it in generically */
+ if (rqlist->have_lobound)
+ s1 *= rqlist->lobound;
+ else
+ s1 *= rqlist->hibound;
+ }
+ /* release storage and advance */
+ rqnext = rqlist->next;
+ pfree(rqlist);
+ rqlist = rqnext;
+ }
+
+ return s1;
+}
+
+/*
+ * clauselist_selectivity_or -
+ * Compute the selectivity of an implicitly-ORed list of boolean
+ * expression clauses. The list can be empty, in which case 0.0
+ * must be returned. List elements may be either RestrictInfos
+ * or bare expression clauses --- the former is preferred since
+ * it allows caching of results.
+ *
+ * See clause_selectivity() for the meaning of the additional parameters.
+ *
+ * The basic approach is to apply extended statistics first, on as many
+ * clauses as possible, in order to capture cross-column dependencies etc.
+ * The remaining clauses are then estimated as if they were independent.
+ */
+static Selectivity
+clauselist_selectivity_or(PlannerInfo *root,
+ List *clauses,
+ int varRelid,
+ JoinType jointype,
+ SpecialJoinInfo *sjinfo,
+ bool use_extended_stats)
+{
+ Selectivity s1 = 0.0;
+ RelOptInfo *rel;
+ Bitmapset *estimatedclauses = NULL;
+ ListCell *lc;
+ int listidx;
+
+ /*
+ * Determine if these clauses reference a single relation. If so, and if
+ * it has extended statistics, try to apply those.
+ */
+ rel = find_single_rel_for_clauses(root, clauses);
+ if (use_extended_stats && rel && rel->rtekind == RTE_RELATION && rel->statlist != NIL)
+ {
+ /*
+ * Estimate as many clauses as possible using extended statistics.
+ *
+ * 'estimatedclauses' is populated with the 0-based list position
+ * index of clauses estimated here, and that should be ignored below.
+ */
+ s1 = statext_clauselist_selectivity(root, clauses, varRelid,
+ jointype, sjinfo, rel,
+ &estimatedclauses, true);
+ }
+
+ /*
+ * Estimate the remaining clauses as if they were independent.
+ *
+ * Selectivities for an OR clause are computed as s1+s2 - s1*s2 to account
+ * for the probable overlap of selected tuple sets.
+ *
+ * XXX is this too conservative?
+ */
+ listidx = -1;
+ foreach(lc, clauses)
+ {
+ Selectivity s2;
+
+ listidx++;
+
+ /*
+ * Skip this clause if it's already been estimated by some other
+ * statistics above.
+ */
+ if (bms_is_member(listidx, estimatedclauses))
+ continue;
+
+ s2 = clause_selectivity_ext(root, (Node *) lfirst(lc), varRelid,
+ jointype, sjinfo, use_extended_stats);
+
+ s1 = s1 + s2 - s1 * s2;
+ }
+
+ return s1;
+}
+
+/*
+ * addRangeClause --- add a new range clause for clauselist_selectivity
+ *
+ * Here is where we try to match up pairs of range-query clauses
+ */
+static void
+addRangeClause(RangeQueryClause **rqlist, Node *clause,
+ bool varonleft, bool isLTsel, Selectivity s2)
+{
+ RangeQueryClause *rqelem;
+ Node *var;
+ bool is_lobound;
+
+ if (varonleft)
+ {
+ var = get_leftop((Expr *) clause);
+ is_lobound = !isLTsel; /* x < something is high bound */
+ }
+ else
+ {
+ var = get_rightop((Expr *) clause);
+ is_lobound = isLTsel; /* something < x is low bound */
+ }
+
+ for (rqelem = *rqlist; rqelem; rqelem = rqelem->next)
+ {
+ /*
+ * We use full equal() here because the "var" might be a function of
+ * one or more attributes of the same relation...
+ */
+ if (!equal(var, rqelem->var))
+ continue;
+ /* Found the right group to put this clause in */
+ if (is_lobound)
+ {
+ if (!rqelem->have_lobound)
+ {
+ rqelem->have_lobound = true;
+ rqelem->lobound = s2;
+ }
+ else
+ {
+
+ /*------
+ * We have found two similar clauses, such as
+ * x < y AND x <= z.
+ * Keep only the more restrictive one.
+ *------
+ */
+ if (rqelem->lobound > s2)
+ rqelem->lobound = s2;
+ }
+ }
+ else
+ {
+ if (!rqelem->have_hibound)
+ {
+ rqelem->have_hibound = true;
+ rqelem->hibound = s2;
+ }
+ else
+ {
+
+ /*------
+ * We have found two similar clauses, such as
+ * x > y AND x >= z.
+ * Keep only the more restrictive one.
+ *------
+ */
+ if (rqelem->hibound > s2)
+ rqelem->hibound = s2;
+ }
+ }
+ return;
+ }
+
+ /* No matching var found, so make a new clause-pair data structure */
+ rqelem = (RangeQueryClause *) palloc(sizeof(RangeQueryClause));
+ rqelem->var = var;
+ if (is_lobound)
+ {
+ rqelem->have_lobound = true;
+ rqelem->have_hibound = false;
+ rqelem->lobound = s2;
+ }
+ else
+ {
+ rqelem->have_lobound = false;
+ rqelem->have_hibound = true;
+ rqelem->hibound = s2;
+ }
+ rqelem->next = *rqlist;
+ *rqlist = rqelem;
+}
+
+/*
+ * find_single_rel_for_clauses
+ * Examine each clause in 'clauses' and determine if all clauses
+ * reference only a single relation. If so return that relation,
+ * otherwise return NULL.
+ */
+static RelOptInfo *
+find_single_rel_for_clauses(PlannerInfo *root, List *clauses)
+{
+ int lastrelid = 0;
+ ListCell *l;
+
+ foreach(l, clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
+ int relid;
+
+ /*
+ * If we have a list of bare clauses rather than RestrictInfos, we
+ * could pull out their relids the hard way with pull_varnos().
+ * However, currently the extended-stats machinery won't do anything
+ * with non-RestrictInfo clauses anyway, so there's no point in
+ * spending extra cycles; just fail if that's what we have.
+ *
+ * An exception to that rule is if we have a bare BoolExpr AND clause.
+ * We treat this as a special case because the restrictinfo machinery
+ * doesn't build RestrictInfos on top of AND clauses.
+ */
+ if (is_andclause(rinfo))
+ {
+ RelOptInfo *rel;
+
+ rel = find_single_rel_for_clauses(root,
+ ((BoolExpr *) rinfo)->args);
+
+ if (rel == NULL)
+ return NULL;
+ if (lastrelid == 0)
+ lastrelid = rel->relid;
+ else if (rel->relid != lastrelid)
+ return NULL;
+
+ continue;
+ }
+
+ if (!IsA(rinfo, RestrictInfo))
+ return NULL;
+
+ if (bms_is_empty(rinfo->clause_relids))
+ continue; /* we can ignore variable-free clauses */
+ if (!bms_get_singleton_member(rinfo->clause_relids, &relid))
+ return NULL; /* multiple relations in this clause */
+ if (lastrelid == 0)
+ lastrelid = relid; /* first clause referencing a relation */
+ else if (relid != lastrelid)
+ return NULL; /* relation not same as last one */
+ }
+
+ if (lastrelid != 0)
+ return find_base_rel(root, lastrelid);
+
+ return NULL; /* no clauses */
+}
+
+/*
+ * bms_is_subset_singleton
+ *
+ * Same result as bms_is_subset(s, bms_make_singleton(x)),
+ * but a little faster and doesn't leak memory.
+ *
+ * Is this of use anywhere else? If so move to bitmapset.c ...
+ */
+static bool
+bms_is_subset_singleton(const Bitmapset *s, int x)
+{
+ switch (bms_membership(s))
+ {
+ case BMS_EMPTY_SET:
+ return true;
+ case BMS_SINGLETON:
+ return bms_is_member(x, s);
+ case BMS_MULTIPLE:
+ return false;
+ }
+ /* can't get here... */
+ return false;
+}
+
+/*
+ * treat_as_join_clause -
+ * Decide whether an operator clause is to be handled by the
+ * restriction or join estimator. Subroutine for clause_selectivity().
+ */
+static inline bool
+treat_as_join_clause(PlannerInfo *root, Node *clause, RestrictInfo *rinfo,
+ int varRelid, SpecialJoinInfo *sjinfo)
+{
+ if (varRelid != 0)
+ {
+ /*
+ * Caller is forcing restriction mode (eg, because we are examining an
+ * inner indexscan qual).
+ */
+ return false;
+ }
+ else if (sjinfo == NULL)
+ {
+ /*
+ * It must be a restriction clause, since it's being evaluated at a
+ * scan node.
+ */
+ return false;
+ }
+ else
+ {
+ /*
+ * Otherwise, it's a join if there's more than one relation used. We
+ * can optimize this calculation if an rinfo was passed.
+ *
+ * XXX Since we know the clause is being evaluated at a join, the
+ * only way it could be single-relation is if it was delayed by outer
+ * joins. Although we can make use of the restriction qual estimators
+ * anyway, it seems likely that we ought to account for the
+ * probability of injected nulls somehow.
+ */
+ if (rinfo)
+ return (bms_membership(rinfo->clause_relids) == BMS_MULTIPLE);
+ else
+ return (NumRelids(root, clause) > 1);
+ }
+}
+
+
+/*
+ * clause_selectivity -
+ * Compute the selectivity of a general boolean expression clause.
+ *
+ * The clause can be either a RestrictInfo or a plain expression. If it's
+ * a RestrictInfo, we try to cache the selectivity for possible re-use,
+ * so passing RestrictInfos is preferred.
+ *
+ * varRelid is either 0 or a rangetable index.
+ *
+ * When varRelid is not 0, only variables belonging to that relation are
+ * considered in computing selectivity; other vars are treated as constants
+ * of unknown values. This is appropriate for estimating the selectivity of
+ * a join clause that is being used as a restriction clause in a scan of a
+ * nestloop join's inner relation --- varRelid should then be the ID of the
+ * inner relation.
+ *
+ * When varRelid is 0, all variables are treated as variables. This
+ * is appropriate for ordinary join clauses and restriction clauses.
+ *
+ * jointype is the join type, if the clause is a join clause. Pass JOIN_INNER
+ * if the clause isn't a join clause.
+ *
+ * sjinfo is NULL for a non-join clause, otherwise it provides additional
+ * context information about the join being performed. There are some
+ * special cases:
+ * 1. For a special (not INNER) join, sjinfo is always a member of
+ * root->join_info_list.
+ * 2. For an INNER join, sjinfo is just a transient struct, and only the
+ * relids and jointype fields in it can be trusted.
+ * It is possible for jointype to be different from sjinfo->jointype.
+ * This indicates we are considering a variant join: either with
+ * the LHS and RHS switched, or with one input unique-ified.
+ *
+ * Note: when passing nonzero varRelid, it's normally appropriate to set
+ * jointype == JOIN_INNER, sjinfo == NULL, even if the clause is really a
+ * join clause; because we aren't treating it as a join clause.
+ */
+Selectivity
+clause_selectivity(PlannerInfo *root,
+ Node *clause,
+ int varRelid,
+ JoinType jointype,
+ SpecialJoinInfo *sjinfo)
+{
+ return clause_selectivity_ext(root, clause, varRelid,
+ jointype, sjinfo, true);
+}
+
+/*
+ * clause_selectivity_ext -
+ * Extended version of clause_selectivity(). If "use_extended_stats" is
+ * false, all extended statistics will be ignored, and only per-column
+ * statistics will be used.
+ */
+Selectivity
+clause_selectivity_ext(PlannerInfo *root,
+ Node *clause,
+ int varRelid,
+ JoinType jointype,
+ SpecialJoinInfo *sjinfo,
+ bool use_extended_stats)
+{
+ Selectivity s1 = 0.5; /* default for any unhandled clause type */
+ RestrictInfo *rinfo = NULL;
+ bool cacheable = false;
+
+ if (clause == NULL) /* can this still happen? */
+ return s1;
+
+ if (IsA(clause, RestrictInfo))
+ {
+ rinfo = (RestrictInfo *) clause;
+
+ /*
+ * If the clause is marked pseudoconstant, then it will be used as a
+ * gating qual and should not affect selectivity estimates; hence
+ * return 1.0. The only exception is that a constant FALSE may be
+ * taken as having selectivity 0.0, since it will surely mean no rows
+ * out of the plan. This case is simple enough that we need not
+ * bother caching the result.
+ */
+ if (rinfo->pseudoconstant)
+ {
+ if (!IsA(rinfo->clause, Const))
+ return (Selectivity) 1.0;
+ }
+
+ /*
+ * If the clause is marked redundant, always return 1.0.
+ */
+ if (rinfo->norm_selec > 1)
+ return (Selectivity) 1.0;
+
+ /*
+ * If possible, cache the result of the selectivity calculation for
+ * the clause. We can cache if varRelid is zero or the clause
+ * contains only vars of that relid --- otherwise varRelid will affect
+ * the result, so mustn't cache. Outer join quals might be examined
+ * with either their join's actual jointype or JOIN_INNER, so we need
+ * two cache variables to remember both cases. Note: we assume the
+ * result won't change if we are switching the input relations or
+ * considering a unique-ified case, so we only need one cache variable
+ * for all non-JOIN_INNER cases.
+ */
+ if (varRelid == 0 ||
+ bms_is_subset_singleton(rinfo->clause_relids, varRelid))
+ {
+ /* Cacheable --- do we already have the result? */
+ if (jointype == JOIN_INNER)
+ {
+ if (rinfo->norm_selec >= 0)
+ return rinfo->norm_selec;
+ }
+ else
+ {
+ if (rinfo->outer_selec >= 0)
+ return rinfo->outer_selec;
+ }
+ cacheable = true;
+ }
+
+ /*
+ * Proceed with examination of contained clause. If the clause is an
+ * OR-clause, we want to look at the variant with sub-RestrictInfos,
+ * so that per-subclause selectivities can be cached.
+ */
+ if (rinfo->orclause)
+ clause = (Node *) rinfo->orclause;
+ else
+ clause = (Node *) rinfo->clause;
+ }
+
+ if (IsA(clause, Var))
+ {
+ Var *var = (Var *) clause;
+
+ /*
+ * We probably shouldn't ever see an uplevel Var here, but if we do,
+ * return the default selectivity...
+ */
+ if (var->varlevelsup == 0 &&
+ (varRelid == 0 || varRelid == (int) var->varno))
+ {
+ /* Use the restriction selectivity function for a bool Var */
+ s1 = boolvarsel(root, (Node *) var, varRelid);
+ }
+ }
+ else if (IsA(clause, Const))
+ {
+ /* bool constant is pretty easy... */
+ Const *con = (Const *) clause;
+
+ s1 = con->constisnull ? 0.0 :
+ DatumGetBool(con->constvalue) ? 1.0 : 0.0;
+ }
+ else if (IsA(clause, Param))
+ {
+ /* see if we can replace the Param */
+ Node *subst = estimate_expression_value(root, clause);
+
+ if (IsA(subst, Const))
+ {
+ /* bool constant is pretty easy... */
+ Const *con = (Const *) subst;
+
+ s1 = con->constisnull ? 0.0 :
+ DatumGetBool(con->constvalue) ? 1.0 : 0.0;
+ }
+ else
+ {
+ /* XXX any way to do better than default? */
+ }
+ }
+ else if (is_notclause(clause))
+ {
+ /* inverse of the selectivity of the underlying clause */
+ s1 = 1.0 - clause_selectivity_ext(root,
+ (Node *) get_notclausearg((Expr *) clause),
+ varRelid,
+ jointype,
+ sjinfo,
+ use_extended_stats);
+ }
+ else if (is_andclause(clause))
+ {
+ /* share code with clauselist_selectivity() */
+ s1 = clauselist_selectivity_ext(root,
+ ((BoolExpr *) clause)->args,
+ varRelid,
+ jointype,
+ sjinfo,
+ use_extended_stats);
+ }
+ else if (is_orclause(clause))
+ {
+ /*
+ * Almost the same thing as clauselist_selectivity, but with the
+ * clauses connected by OR.
+ */
+ s1 = clauselist_selectivity_or(root,
+ ((BoolExpr *) clause)->args,
+ varRelid,
+ jointype,
+ sjinfo,
+ use_extended_stats);
+ }
+ else if (is_opclause(clause) || IsA(clause, DistinctExpr))
+ {
+ OpExpr *opclause = (OpExpr *) clause;
+ Oid opno = opclause->opno;
+
+ if (treat_as_join_clause(root, clause, rinfo, varRelid, sjinfo))
+ {
+ /* Estimate selectivity for a join clause. */
+ s1 = join_selectivity(root, opno,
+ opclause->args,
+ opclause->inputcollid,
+ jointype,
+ sjinfo);
+ }
+ else
+ {
+ /* Estimate selectivity for a restriction clause. */
+ s1 = restriction_selectivity(root, opno,
+ opclause->args,
+ opclause->inputcollid,
+ varRelid);
+ }
+
+ /*
+ * DistinctExpr has the same representation as OpExpr, but the
+ * contained operator is "=" not "<>", so we must negate the result.
+ * This estimation method doesn't give the right behavior for nulls,
+ * but it's better than doing nothing.
+ */
+ if (IsA(clause, DistinctExpr))
+ s1 = 1.0 - s1;
+ }
+ else if (is_funcclause(clause))
+ {
+ FuncExpr *funcclause = (FuncExpr *) clause;
+
+ /* Try to get an estimate from the support function, if any */
+ s1 = function_selectivity(root,
+ funcclause->funcid,
+ funcclause->args,
+ funcclause->inputcollid,
+ treat_as_join_clause(root, clause, rinfo,
+ varRelid, sjinfo),
+ varRelid,
+ jointype,
+ sjinfo);
+ }
+ else if (IsA(clause, ScalarArrayOpExpr))
+ {
+ /* Use node specific selectivity calculation function */
+ s1 = scalararraysel(root,
+ (ScalarArrayOpExpr *) clause,
+ treat_as_join_clause(root, clause, rinfo,
+ varRelid, sjinfo),
+ varRelid,
+ jointype,
+ sjinfo);
+ }
+ else if (IsA(clause, RowCompareExpr))
+ {
+ /* Use node specific selectivity calculation function */
+ s1 = rowcomparesel(root,
+ (RowCompareExpr *) clause,
+ varRelid,
+ jointype,
+ sjinfo);
+ }
+ else if (IsA(clause, NullTest))
+ {
+ /* Use node specific selectivity calculation function */
+ s1 = nulltestsel(root,
+ ((NullTest *) clause)->nulltesttype,
+ (Node *) ((NullTest *) clause)->arg,
+ varRelid,
+ jointype,
+ sjinfo);
+ }
+ else if (IsA(clause, BooleanTest))
+ {
+ /* Use node specific selectivity calculation function */
+ s1 = booltestsel(root,
+ ((BooleanTest *) clause)->booltesttype,
+ (Node *) ((BooleanTest *) clause)->arg,
+ varRelid,
+ jointype,
+ sjinfo);
+ }
+ else if (IsA(clause, CurrentOfExpr))
+ {
+ /* CURRENT OF selects at most one row of its table */
+ CurrentOfExpr *cexpr = (CurrentOfExpr *) clause;
+ RelOptInfo *crel = find_base_rel(root, cexpr->cvarno);
+
+ if (crel->tuples > 0)
+ s1 = 1.0 / crel->tuples;
+ }
+ else if (IsA(clause, RelabelType))
+ {
+ /* Not sure this case is needed, but it can't hurt */
+ s1 = clause_selectivity_ext(root,
+ (Node *) ((RelabelType *) clause)->arg,
+ varRelid,
+ jointype,
+ sjinfo,
+ use_extended_stats);
+ }
+ else if (IsA(clause, CoerceToDomain))
+ {
+ /* Not sure this case is needed, but it can't hurt */
+ s1 = clause_selectivity_ext(root,
+ (Node *) ((CoerceToDomain *) clause)->arg,
+ varRelid,
+ jointype,
+ sjinfo,
+ use_extended_stats);
+ }
+ else
+ {
+ /*
+ * For anything else, see if we can consider it as a boolean variable.
+ * This only works if it's an immutable expression in Vars of a single
+ * relation; but there's no point in us checking that here because
+ * boolvarsel() will do it internally, and return a suitable default
+ * selectivity if not.
+ */
+ s1 = boolvarsel(root, clause, varRelid);
+ }
+
+ /* Cache the result if possible */
+ if (cacheable)
+ {
+ if (jointype == JOIN_INNER)
+ rinfo->norm_selec = s1;
+ else
+ rinfo->outer_selec = s1;
+ }
+
+#ifdef SELECTIVITY_DEBUG
+ elog(DEBUG4, "clause_selectivity: s1 %f", s1);
+#endif /* SELECTIVITY_DEBUG */
+
+ return s1;
+}
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
new file mode 100644
index 0000000..0ba26b2
--- /dev/null
+++ b/src/backend/optimizer/path/costsize.c
@@ -0,0 +1,6221 @@
+/*-------------------------------------------------------------------------
+ *
+ * costsize.c
+ * Routines to compute (and set) relation sizes and path costs
+ *
+ * Path costs are measured in arbitrary units established by these basic
+ * parameters:
+ *
+ * seq_page_cost Cost of a sequential page fetch
+ * random_page_cost Cost of a non-sequential page fetch
+ * cpu_tuple_cost Cost of typical CPU time to process a tuple
+ * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
+ * cpu_operator_cost Cost of CPU time to execute an operator or function
+ * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
+ * parallel_setup_cost Cost of setting up shared memory for parallelism
+ *
+ * We expect that the kernel will typically do some amount of read-ahead
+ * optimization; this in conjunction with seek costs means that seq_page_cost
+ * is normally considerably less than random_page_cost. (However, if the
+ * database is fully cached in RAM, it is reasonable to set them equal.)
+ *
+ * We also use a rough estimate "effective_cache_size" of the number of
+ * disk pages in Postgres + OS-level disk cache. (We can't simply use
+ * NBuffers for this purpose because that would ignore the effects of
+ * the kernel's disk cache.)
+ *
+ * Obviously, taking constants for these values is an oversimplification,
+ * but it's tough enough to get any useful estimates even at this level of
+ * detail. Note that all of these parameters are user-settable, in case
+ * the default values are drastically off for a particular platform.
+ *
+ * seq_page_cost and random_page_cost can also be overridden for an individual
+ * tablespace, in case some data is on a fast disk and other data is on a slow
+ * disk. Per-tablespace overrides never apply to temporary work files such as
+ * an external sort or a materialize node that overflows work_mem.
+ *
+ * We compute two separate costs for each path:
+ * total_cost: total estimated cost to fetch all tuples
+ * startup_cost: cost that is expended before first tuple is fetched
+ * In some scenarios, such as when there is a LIMIT or we are implementing
+ * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
+ * path's result. A caller can estimate the cost of fetching a partial
+ * result by interpolating between startup_cost and total_cost. In detail:
+ * actual_cost = startup_cost +
+ * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
+ * Note that a base relation's rows count (and, by extension, plan_rows for
+ * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
+ * that this equation works properly. (Note: while path->rows is never zero
+ * for ordinary relations, it is zero for paths for provably-empty relations,
+ * so beware of division-by-zero.) The LIMIT is applied as a top-level
+ * plan node.
+ *
+ * For largely historical reasons, most of the routines in this module use
+ * the passed result Path only to store their results (rows, startup_cost and
+ * total_cost) into. All the input data they need is passed as separate
+ * parameters, even though much of it could be extracted from the Path.
+ * An exception is made for the cost_XXXjoin() routines, which expect all
+ * the other fields of the passed XXXPath to be filled in, and similarly
+ * cost_index() assumes the passed IndexPath is valid except for its output
+ * values.
+ *
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/path/costsize.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include <limits.h>
+#include <math.h>
+
+#include "access/amapi.h"
+#include "access/htup_details.h"
+#include "access/tsmapi.h"
+#include "executor/executor.h"
+#include "executor/nodeAgg.h"
+#include "executor/nodeHash.h"
+#include "executor/nodeMemoize.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/placeholder.h"
+#include "optimizer/plancat.h"
+#include "optimizer/planmain.h"
+#include "optimizer/restrictinfo.h"
+#include "parser/parsetree.h"
+#include "utils/lsyscache.h"
+#include "utils/selfuncs.h"
+#include "utils/spccache.h"
+#include "utils/tuplesort.h"
+
+
+#define LOG2(x) (log(x) / 0.693147180559945)
+
+/*
+ * Append and MergeAppend nodes are less expensive than some other operations
+ * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
+ * per-tuple cost as cpu_tuple_cost multiplied by this value.
+ */
+#define APPEND_CPU_COST_MULTIPLIER 0.5
+
+/*
+ * Maximum value for row estimates. We cap row estimates to this to help
+ * ensure that costs based on these estimates remain within the range of what
+ * double can represent. add_path() wouldn't act sanely given infinite or NaN
+ * cost values.
+ */
+#define MAXIMUM_ROWCOUNT 1e100
+
+double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
+double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
+double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
+double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
+double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
+double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
+double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
+double recursive_worktable_factor = DEFAULT_RECURSIVE_WORKTABLE_FACTOR;
+
+int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
+
+Cost disable_cost = 1.0e10;
+
+int max_parallel_workers_per_gather = 2;
+
+bool enable_seqscan = true;
+bool enable_indexscan = true;
+bool enable_indexonlyscan = true;
+bool enable_bitmapscan = true;
+bool enable_tidscan = true;
+bool enable_sort = true;
+bool enable_incremental_sort = true;
+bool enable_hashagg = true;
+bool enable_nestloop = true;
+bool enable_material = true;
+bool enable_memoize = true;
+bool enable_mergejoin = true;
+bool enable_hashjoin = true;
+bool enable_gathermerge = true;
+bool enable_partitionwise_join = false;
+bool enable_partitionwise_aggregate = false;
+bool enable_parallel_append = true;
+bool enable_parallel_hash = true;
+bool enable_partition_pruning = true;
+bool enable_async_append = true;
+
+typedef struct
+{
+ PlannerInfo *root;
+ QualCost total;
+} cost_qual_eval_context;
+
+static List *extract_nonindex_conditions(List *qual_clauses, List *indexclauses);
+static MergeScanSelCache *cached_scansel(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ PathKey *pathkey);
+static void cost_rescan(PlannerInfo *root, Path *path,
+ Cost *rescan_startup_cost, Cost *rescan_total_cost);
+static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
+static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
+ ParamPathInfo *param_info,
+ QualCost *qpqual_cost);
+static bool has_indexed_join_quals(NestPath *path);
+static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
+ List *quals);
+static double calc_joinrel_size_estimate(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel,
+ double outer_rows,
+ double inner_rows,
+ SpecialJoinInfo *sjinfo,
+ List *restrictlist);
+static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
+ Relids outer_relids,
+ Relids inner_relids,
+ SpecialJoinInfo *sjinfo,
+ List **restrictlist);
+static Cost append_nonpartial_cost(List *subpaths, int numpaths,
+ int parallel_workers);
+static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
+static double relation_byte_size(double tuples, int width);
+static double page_size(double tuples, int width);
+static double get_parallel_divisor(Path *path);
+
+
+/*
+ * clamp_row_est
+ * Force a row-count estimate to a sane value.
+ */
+double
+clamp_row_est(double nrows)
+{
+ /*
+ * Avoid infinite and NaN row estimates. Costs derived from such values
+ * are going to be useless. Also force the estimate to be at least one
+ * row, to make explain output look better and to avoid possible
+ * divide-by-zero when interpolating costs. Make it an integer, too.
+ */
+ if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows))
+ nrows = MAXIMUM_ROWCOUNT;
+ else if (nrows <= 1.0)
+ nrows = 1.0;
+ else
+ nrows = rint(nrows);
+
+ return nrows;
+}
+
+/*
+ * clamp_cardinality_to_long
+ * Cast a Cardinality value to a sane long value.
+ */
+long
+clamp_cardinality_to_long(Cardinality x)
+{
+ /*
+ * Just for paranoia's sake, ensure we do something sane with negative or
+ * NaN values.
+ */
+ if (isnan(x))
+ return LONG_MAX;
+ if (x <= 0)
+ return 0;
+
+ /*
+ * If "long" is 64 bits, then LONG_MAX cannot be represented exactly as a
+ * double. Casting it to double and back may well result in overflow due
+ * to rounding, so avoid doing that. We trust that any double value that
+ * compares strictly less than "(double) LONG_MAX" will cast to a
+ * representable "long" value.
+ */
+ return (x < (double) LONG_MAX) ? (long) x : LONG_MAX;
+}
+
+
+/*
+ * cost_seqscan
+ * Determines and returns the cost of scanning a relation sequentially.
+ *
+ * 'baserel' is the relation to be scanned
+ * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
+ */
+void
+cost_seqscan(Path *path, PlannerInfo *root,
+ RelOptInfo *baserel, ParamPathInfo *param_info)
+{
+ Cost startup_cost = 0;
+ Cost cpu_run_cost;
+ Cost disk_run_cost;
+ double spc_seq_page_cost;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+
+ /* Should only be applied to base relations */
+ Assert(baserel->relid > 0);
+ Assert(baserel->rtekind == RTE_RELATION);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ if (!enable_seqscan)
+ startup_cost += disable_cost;
+
+ /* fetch estimated page cost for tablespace containing table */
+ get_tablespace_page_costs(baserel->reltablespace,
+ NULL,
+ &spc_seq_page_cost);
+
+ /*
+ * disk costs
+ */
+ disk_run_cost = spc_seq_page_cost * baserel->pages;
+
+ /* CPU costs */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ startup_cost += qpqual_cost.startup;
+ cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
+ cpu_run_cost = cpu_per_tuple * baserel->tuples;
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->pathtarget->cost.startup;
+ cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
+
+ /* Adjust costing for parallelism, if used. */
+ if (path->parallel_workers > 0)
+ {
+ double parallel_divisor = get_parallel_divisor(path);
+
+ /* The CPU cost is divided among all the workers. */
+ cpu_run_cost /= parallel_divisor;
+
+ /*
+ * It may be possible to amortize some of the I/O cost, but probably
+ * not very much, because most operating systems already do aggressive
+ * prefetching. For now, we assume that the disk run cost can't be
+ * amortized at all.
+ */
+
+ /*
+ * In the case of a parallel plan, the row count needs to represent
+ * the number of tuples processed per worker.
+ */
+ path->rows = clamp_row_est(path->rows / parallel_divisor);
+ }
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
+}
+
+/*
+ * cost_samplescan
+ * Determines and returns the cost of scanning a relation using sampling.
+ *
+ * 'baserel' is the relation to be scanned
+ * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
+ */
+void
+cost_samplescan(Path *path, PlannerInfo *root,
+ RelOptInfo *baserel, ParamPathInfo *param_info)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ RangeTblEntry *rte;
+ TableSampleClause *tsc;
+ TsmRoutine *tsm;
+ double spc_seq_page_cost,
+ spc_random_page_cost,
+ spc_page_cost;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+
+ /* Should only be applied to base relations with tablesample clauses */
+ Assert(baserel->relid > 0);
+ rte = planner_rt_fetch(baserel->relid, root);
+ Assert(rte->rtekind == RTE_RELATION);
+ tsc = rte->tablesample;
+ Assert(tsc != NULL);
+ tsm = GetTsmRoutine(tsc->tsmhandler);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ /* fetch estimated page cost for tablespace containing table */
+ get_tablespace_page_costs(baserel->reltablespace,
+ &spc_random_page_cost,
+ &spc_seq_page_cost);
+
+ /* if NextSampleBlock is used, assume random access, else sequential */
+ spc_page_cost = (tsm->NextSampleBlock != NULL) ?
+ spc_random_page_cost : spc_seq_page_cost;
+
+ /*
+ * disk costs (recall that baserel->pages has already been set to the
+ * number of pages the sampling method will visit)
+ */
+ run_cost += spc_page_cost * baserel->pages;
+
+ /*
+ * CPU costs (recall that baserel->tuples has already been set to the
+ * number of tuples the sampling method will select). Note that we ignore
+ * execution cost of the TABLESAMPLE parameter expressions; they will be
+ * evaluated only once per scan, and in most usages they'll likely be
+ * simple constants anyway. We also don't charge anything for the
+ * calculations the sampling method might do internally.
+ */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ startup_cost += qpqual_cost.startup;
+ cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
+ run_cost += cpu_per_tuple * baserel->tuples;
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->pathtarget->cost.startup;
+ run_cost += path->pathtarget->cost.per_tuple * path->rows;
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_gather
+ * Determines and returns the cost of gather path.
+ *
+ * 'rel' is the relation to be operated upon
+ * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
+ * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
+ * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
+ * correspond to any particular RelOptInfo.
+ */
+void
+cost_gather(GatherPath *path, PlannerInfo *root,
+ RelOptInfo *rel, ParamPathInfo *param_info,
+ double *rows)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+
+ /* Mark the path with the correct row estimate */
+ if (rows)
+ path->path.rows = *rows;
+ else if (param_info)
+ path->path.rows = param_info->ppi_rows;
+ else
+ path->path.rows = rel->rows;
+
+ startup_cost = path->subpath->startup_cost;
+
+ run_cost = path->subpath->total_cost - path->subpath->startup_cost;
+
+ /* Parallel setup and communication cost. */
+ startup_cost += parallel_setup_cost;
+ run_cost += parallel_tuple_cost * path->path.rows;
+
+ path->path.startup_cost = startup_cost;
+ path->path.total_cost = (startup_cost + run_cost);
+}
+
+/*
+ * cost_gather_merge
+ * Determines and returns the cost of gather merge path.
+ *
+ * GatherMerge merges several pre-sorted input streams, using a heap that at
+ * any given instant holds the next tuple from each stream. If there are N
+ * streams, we need about N*log2(N) tuple comparisons to construct the heap at
+ * startup, and then for each output tuple, about log2(N) comparisons to
+ * replace the top heap entry with the next tuple from the same stream.
+ */
+void
+cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
+ RelOptInfo *rel, ParamPathInfo *param_info,
+ Cost input_startup_cost, Cost input_total_cost,
+ double *rows)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ Cost comparison_cost;
+ double N;
+ double logN;
+
+ /* Mark the path with the correct row estimate */
+ if (rows)
+ path->path.rows = *rows;
+ else if (param_info)
+ path->path.rows = param_info->ppi_rows;
+ else
+ path->path.rows = rel->rows;
+
+ if (!enable_gathermerge)
+ startup_cost += disable_cost;
+
+ /*
+ * Add one to the number of workers to account for the leader. This might
+ * be overgenerous since the leader will do less work than other workers
+ * in typical cases, but we'll go with it for now.
+ */
+ Assert(path->num_workers > 0);
+ N = (double) path->num_workers + 1;
+ logN = LOG2(N);
+
+ /* Assumed cost per tuple comparison */
+ comparison_cost = 2.0 * cpu_operator_cost;
+
+ /* Heap creation cost */
+ startup_cost += comparison_cost * N * logN;
+
+ /* Per-tuple heap maintenance cost */
+ run_cost += path->path.rows * comparison_cost * logN;
+
+ /* small cost for heap management, like cost_merge_append */
+ run_cost += cpu_operator_cost * path->path.rows;
+
+ /*
+ * Parallel setup and communication cost. Since Gather Merge, unlike
+ * Gather, requires us to block until a tuple is available from every
+ * worker, we bump the IPC cost up a little bit as compared with Gather.
+ * For lack of a better idea, charge an extra 5%.
+ */
+ startup_cost += parallel_setup_cost;
+ run_cost += parallel_tuple_cost * path->path.rows * 1.05;
+
+ path->path.startup_cost = startup_cost + input_startup_cost;
+ path->path.total_cost = (startup_cost + run_cost + input_total_cost);
+}
+
+/*
+ * cost_index
+ * Determines and returns the cost of scanning a relation using an index.
+ *
+ * 'path' describes the indexscan under consideration, and is complete
+ * except for the fields to be set by this routine
+ * 'loop_count' is the number of repetitions of the indexscan to factor into
+ * estimates of caching behavior
+ *
+ * In addition to rows, startup_cost and total_cost, cost_index() sets the
+ * path's indextotalcost and indexselectivity fields. These values will be
+ * needed if the IndexPath is used in a BitmapIndexScan.
+ *
+ * NOTE: path->indexquals must contain only clauses usable as index
+ * restrictions. Any additional quals evaluated as qpquals may reduce the
+ * number of returned tuples, but they won't reduce the number of tuples
+ * we have to fetch from the table, so they don't reduce the scan cost.
+ */
+void
+cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
+ bool partial_path)
+{
+ IndexOptInfo *index = path->indexinfo;
+ RelOptInfo *baserel = index->rel;
+ bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
+ amcostestimate_function amcostestimate;
+ List *qpquals;
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ Cost cpu_run_cost = 0;
+ Cost indexStartupCost;
+ Cost indexTotalCost;
+ Selectivity indexSelectivity;
+ double indexCorrelation,
+ csquared;
+ double spc_seq_page_cost,
+ spc_random_page_cost;
+ Cost min_IO_cost,
+ max_IO_cost;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+ double tuples_fetched;
+ double pages_fetched;
+ double rand_heap_pages;
+ double index_pages;
+
+ /* Should only be applied to base relations */
+ Assert(IsA(baserel, RelOptInfo) &&
+ IsA(index, IndexOptInfo));
+ Assert(baserel->relid > 0);
+ Assert(baserel->rtekind == RTE_RELATION);
+
+ /*
+ * Mark the path with the correct row estimate, and identify which quals
+ * will need to be enforced as qpquals. We need not check any quals that
+ * are implied by the index's predicate, so we can use indrestrictinfo not
+ * baserestrictinfo as the list of relevant restriction clauses for the
+ * rel.
+ */
+ if (path->path.param_info)
+ {
+ path->path.rows = path->path.param_info->ppi_rows;
+ /* qpquals come from the rel's restriction clauses and ppi_clauses */
+ qpquals = list_concat(extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
+ path->indexclauses),
+ extract_nonindex_conditions(path->path.param_info->ppi_clauses,
+ path->indexclauses));
+ }
+ else
+ {
+ path->path.rows = baserel->rows;
+ /* qpquals come from just the rel's restriction clauses */
+ qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
+ path->indexclauses);
+ }
+
+ if (!enable_indexscan)
+ startup_cost += disable_cost;
+ /* we don't need to check enable_indexonlyscan; indxpath.c does that */
+
+ /*
+ * Call index-access-method-specific code to estimate the processing cost
+ * for scanning the index, as well as the selectivity of the index (ie,
+ * the fraction of main-table tuples we will have to retrieve) and its
+ * correlation to the main-table tuple order. We need a cast here because
+ * pathnodes.h uses a weak function type to avoid including amapi.h.
+ */
+ amcostestimate = (amcostestimate_function) index->amcostestimate;
+ amcostestimate(root, path, loop_count,
+ &indexStartupCost, &indexTotalCost,
+ &indexSelectivity, &indexCorrelation,
+ &index_pages);
+
+ /*
+ * Save amcostestimate's results for possible use in bitmap scan planning.
+ * We don't bother to save indexStartupCost or indexCorrelation, because a
+ * bitmap scan doesn't care about either.
+ */
+ path->indextotalcost = indexTotalCost;
+ path->indexselectivity = indexSelectivity;
+
+ /* all costs for touching index itself included here */
+ startup_cost += indexStartupCost;
+ run_cost += indexTotalCost - indexStartupCost;
+
+ /* estimate number of main-table tuples fetched */
+ tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
+
+ /* fetch estimated page costs for tablespace containing table */
+ get_tablespace_page_costs(baserel->reltablespace,
+ &spc_random_page_cost,
+ &spc_seq_page_cost);
+
+ /*----------
+ * Estimate number of main-table pages fetched, and compute I/O cost.
+ *
+ * When the index ordering is uncorrelated with the table ordering,
+ * we use an approximation proposed by Mackert and Lohman (see
+ * index_pages_fetched() for details) to compute the number of pages
+ * fetched, and then charge spc_random_page_cost per page fetched.
+ *
+ * When the index ordering is exactly correlated with the table ordering
+ * (just after a CLUSTER, for example), the number of pages fetched should
+ * be exactly selectivity * table_size. What's more, all but the first
+ * will be sequential fetches, not the random fetches that occur in the
+ * uncorrelated case. So if the number of pages is more than 1, we
+ * ought to charge
+ * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
+ * For partially-correlated indexes, we ought to charge somewhere between
+ * these two estimates. We currently interpolate linearly between the
+ * estimates based on the correlation squared (XXX is that appropriate?).
+ *
+ * If it's an index-only scan, then we will not need to fetch any heap
+ * pages for which the visibility map shows all tuples are visible.
+ * Hence, reduce the estimated number of heap fetches accordingly.
+ * We use the measured fraction of the entire heap that is all-visible,
+ * which might not be particularly relevant to the subset of the heap
+ * that this query will fetch; but it's not clear how to do better.
+ *----------
+ */
+ if (loop_count > 1)
+ {
+ /*
+ * For repeated indexscans, the appropriate estimate for the
+ * uncorrelated case is to scale up the number of tuples fetched in
+ * the Mackert and Lohman formula by the number of scans, so that we
+ * estimate the number of pages fetched by all the scans; then
+ * pro-rate the costs for one scan. In this case we assume all the
+ * fetches are random accesses.
+ */
+ pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
+ baserel->pages,
+ (double) index->pages,
+ root);
+
+ if (indexonly)
+ pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
+
+ rand_heap_pages = pages_fetched;
+
+ max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
+
+ /*
+ * In the perfectly correlated case, the number of pages touched by
+ * each scan is selectivity * table_size, and we can use the Mackert
+ * and Lohman formula at the page level to estimate how much work is
+ * saved by caching across scans. We still assume all the fetches are
+ * random, though, which is an overestimate that's hard to correct for
+ * without double-counting the cache effects. (But in most cases
+ * where such a plan is actually interesting, only one page would get
+ * fetched per scan anyway, so it shouldn't matter much.)
+ */
+ pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
+
+ pages_fetched = index_pages_fetched(pages_fetched * loop_count,
+ baserel->pages,
+ (double) index->pages,
+ root);
+
+ if (indexonly)
+ pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
+
+ min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
+ }
+ else
+ {
+ /*
+ * Normal case: apply the Mackert and Lohman formula, and then
+ * interpolate between that and the correlation-derived result.
+ */
+ pages_fetched = index_pages_fetched(tuples_fetched,
+ baserel->pages,
+ (double) index->pages,
+ root);
+
+ if (indexonly)
+ pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
+
+ rand_heap_pages = pages_fetched;
+
+ /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
+ max_IO_cost = pages_fetched * spc_random_page_cost;
+
+ /* min_IO_cost is for the perfectly correlated case (csquared=1) */
+ pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
+
+ if (indexonly)
+ pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
+
+ if (pages_fetched > 0)
+ {
+ min_IO_cost = spc_random_page_cost;
+ if (pages_fetched > 1)
+ min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
+ }
+ else
+ min_IO_cost = 0;
+ }
+
+ if (partial_path)
+ {
+ /*
+ * For index only scans compute workers based on number of index pages
+ * fetched; the number of heap pages we fetch might be so small as to
+ * effectively rule out parallelism, which we don't want to do.
+ */
+ if (indexonly)
+ rand_heap_pages = -1;
+
+ /*
+ * Estimate the number of parallel workers required to scan index. Use
+ * the number of heap pages computed considering heap fetches won't be
+ * sequential as for parallel scans the pages are accessed in random
+ * order.
+ */
+ path->path.parallel_workers = compute_parallel_worker(baserel,
+ rand_heap_pages,
+ index_pages,
+ max_parallel_workers_per_gather);
+
+ /*
+ * Fall out if workers can't be assigned for parallel scan, because in
+ * such a case this path will be rejected. So there is no benefit in
+ * doing extra computation.
+ */
+ if (path->path.parallel_workers <= 0)
+ return;
+
+ path->path.parallel_aware = true;
+ }
+
+ /*
+ * Now interpolate based on estimated index order correlation to get total
+ * disk I/O cost for main table accesses.
+ */
+ csquared = indexCorrelation * indexCorrelation;
+
+ run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
+
+ /*
+ * Estimate CPU costs per tuple.
+ *
+ * What we want here is cpu_tuple_cost plus the evaluation costs of any
+ * qual clauses that we have to evaluate as qpquals.
+ */
+ cost_qual_eval(&qpqual_cost, qpquals, root);
+
+ startup_cost += qpqual_cost.startup;
+ cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
+
+ cpu_run_cost += cpu_per_tuple * tuples_fetched;
+
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->path.pathtarget->cost.startup;
+ cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
+
+ /* Adjust costing for parallelism, if used. */
+ if (path->path.parallel_workers > 0)
+ {
+ double parallel_divisor = get_parallel_divisor(&path->path);
+
+ path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
+
+ /* The CPU cost is divided among all the workers. */
+ cpu_run_cost /= parallel_divisor;
+ }
+
+ run_cost += cpu_run_cost;
+
+ path->path.startup_cost = startup_cost;
+ path->path.total_cost = startup_cost + run_cost;
+}
+
+/*
+ * extract_nonindex_conditions
+ *
+ * Given a list of quals to be enforced in an indexscan, extract the ones that
+ * will have to be applied as qpquals (ie, the index machinery won't handle
+ * them). Here we detect only whether a qual clause is directly redundant
+ * with some indexclause. If the index path is chosen for use, createplan.c
+ * will try a bit harder to get rid of redundant qual conditions; specifically
+ * it will see if quals can be proven to be implied by the indexquals. But
+ * it does not seem worth the cycles to try to factor that in at this stage,
+ * since we're only trying to estimate qual eval costs. Otherwise this must
+ * match the logic in create_indexscan_plan().
+ *
+ * qual_clauses, and the result, are lists of RestrictInfos.
+ * indexclauses is a list of IndexClauses.
+ */
+static List *
+extract_nonindex_conditions(List *qual_clauses, List *indexclauses)
+{
+ List *result = NIL;
+ ListCell *lc;
+
+ foreach(lc, qual_clauses)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
+
+ if (rinfo->pseudoconstant)
+ continue; /* we may drop pseudoconstants here */
+ if (is_redundant_with_indexclauses(rinfo, indexclauses))
+ continue; /* dup or derived from same EquivalenceClass */
+ /* ... skip the predicate proof attempt createplan.c will try ... */
+ result = lappend(result, rinfo);
+ }
+ return result;
+}
+
+/*
+ * index_pages_fetched
+ * Estimate the number of pages actually fetched after accounting for
+ * cache effects.
+ *
+ * We use an approximation proposed by Mackert and Lohman, "Index Scans
+ * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
+ * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
+ * The Mackert and Lohman approximation is that the number of pages
+ * fetched is
+ * PF =
+ * min(2TNs/(2T+Ns), T) when T <= b
+ * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
+ * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
+ * where
+ * T = # pages in table
+ * N = # tuples in table
+ * s = selectivity = fraction of table to be scanned
+ * b = # buffer pages available (we include kernel space here)
+ *
+ * We assume that effective_cache_size is the total number of buffer pages
+ * available for the whole query, and pro-rate that space across all the
+ * tables in the query and the index currently under consideration. (This
+ * ignores space needed for other indexes used by the query, but since we
+ * don't know which indexes will get used, we can't estimate that very well;
+ * and in any case counting all the tables may well be an overestimate, since
+ * depending on the join plan not all the tables may be scanned concurrently.)
+ *
+ * The product Ns is the number of tuples fetched; we pass in that
+ * product rather than calculating it here. "pages" is the number of pages
+ * in the object under consideration (either an index or a table).
+ * "index_pages" is the amount to add to the total table space, which was
+ * computed for us by make_one_rel.
+ *
+ * Caller is expected to have ensured that tuples_fetched is greater than zero
+ * and rounded to integer (see clamp_row_est). The result will likewise be
+ * greater than zero and integral.
+ */
+double
+index_pages_fetched(double tuples_fetched, BlockNumber pages,
+ double index_pages, PlannerInfo *root)
+{
+ double pages_fetched;
+ double total_pages;
+ double T,
+ b;
+
+ /* T is # pages in table, but don't allow it to be zero */
+ T = (pages > 1) ? (double) pages : 1.0;
+
+ /* Compute number of pages assumed to be competing for cache space */
+ total_pages = root->total_table_pages + index_pages;
+ total_pages = Max(total_pages, 1.0);
+ Assert(T <= total_pages);
+
+ /* b is pro-rated share of effective_cache_size */
+ b = (double) effective_cache_size * T / total_pages;
+
+ /* force it positive and integral */
+ if (b <= 1.0)
+ b = 1.0;
+ else
+ b = ceil(b);
+
+ /* This part is the Mackert and Lohman formula */
+ if (T <= b)
+ {
+ pages_fetched =
+ (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
+ if (pages_fetched >= T)
+ pages_fetched = T;
+ else
+ pages_fetched = ceil(pages_fetched);
+ }
+ else
+ {
+ double lim;
+
+ lim = (2.0 * T * b) / (2.0 * T - b);
+ if (tuples_fetched <= lim)
+ {
+ pages_fetched =
+ (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
+ }
+ else
+ {
+ pages_fetched =
+ b + (tuples_fetched - lim) * (T - b) / T;
+ }
+ pages_fetched = ceil(pages_fetched);
+ }
+ return pages_fetched;
+}
+
+/*
+ * get_indexpath_pages
+ * Determine the total size of the indexes used in a bitmap index path.
+ *
+ * Note: if the same index is used more than once in a bitmap tree, we will
+ * count it multiple times, which perhaps is the wrong thing ... but it's
+ * not completely clear, and detecting duplicates is difficult, so ignore it
+ * for now.
+ */
+static double
+get_indexpath_pages(Path *bitmapqual)
+{
+ double result = 0;
+ ListCell *l;
+
+ if (IsA(bitmapqual, BitmapAndPath))
+ {
+ BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
+
+ foreach(l, apath->bitmapquals)
+ {
+ result += get_indexpath_pages((Path *) lfirst(l));
+ }
+ }
+ else if (IsA(bitmapqual, BitmapOrPath))
+ {
+ BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
+
+ foreach(l, opath->bitmapquals)
+ {
+ result += get_indexpath_pages((Path *) lfirst(l));
+ }
+ }
+ else if (IsA(bitmapqual, IndexPath))
+ {
+ IndexPath *ipath = (IndexPath *) bitmapqual;
+
+ result = (double) ipath->indexinfo->pages;
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
+
+ return result;
+}
+
+/*
+ * cost_bitmap_heap_scan
+ * Determines and returns the cost of scanning a relation using a bitmap
+ * index-then-heap plan.
+ *
+ * 'baserel' is the relation to be scanned
+ * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
+ * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
+ * 'loop_count' is the number of repetitions of the indexscan to factor into
+ * estimates of caching behavior
+ *
+ * Note: the component IndexPaths in bitmapqual should have been costed
+ * using the same loop_count.
+ */
+void
+cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
+ ParamPathInfo *param_info,
+ Path *bitmapqual, double loop_count)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ Cost indexTotalCost;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+ Cost cost_per_page;
+ Cost cpu_run_cost;
+ double tuples_fetched;
+ double pages_fetched;
+ double spc_seq_page_cost,
+ spc_random_page_cost;
+ double T;
+
+ /* Should only be applied to base relations */
+ Assert(IsA(baserel, RelOptInfo));
+ Assert(baserel->relid > 0);
+ Assert(baserel->rtekind == RTE_RELATION);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ if (!enable_bitmapscan)
+ startup_cost += disable_cost;
+
+ pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
+ loop_count, &indexTotalCost,
+ &tuples_fetched);
+
+ startup_cost += indexTotalCost;
+ T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
+
+ /* Fetch estimated page costs for tablespace containing table. */
+ get_tablespace_page_costs(baserel->reltablespace,
+ &spc_random_page_cost,
+ &spc_seq_page_cost);
+
+ /*
+ * For small numbers of pages we should charge spc_random_page_cost
+ * apiece, while if nearly all the table's pages are being read, it's more
+ * appropriate to charge spc_seq_page_cost apiece. The effect is
+ * nonlinear, too. For lack of a better idea, interpolate like this to
+ * determine the cost per page.
+ */
+ if (pages_fetched >= 2.0)
+ cost_per_page = spc_random_page_cost -
+ (spc_random_page_cost - spc_seq_page_cost)
+ * sqrt(pages_fetched / T);
+ else
+ cost_per_page = spc_random_page_cost;
+
+ run_cost += pages_fetched * cost_per_page;
+
+ /*
+ * Estimate CPU costs per tuple.
+ *
+ * Often the indexquals don't need to be rechecked at each tuple ... but
+ * not always, especially not if there are enough tuples involved that the
+ * bitmaps become lossy. For the moment, just assume they will be
+ * rechecked always. This means we charge the full freight for all the
+ * scan clauses.
+ */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ startup_cost += qpqual_cost.startup;
+ cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
+ cpu_run_cost = cpu_per_tuple * tuples_fetched;
+
+ /* Adjust costing for parallelism, if used. */
+ if (path->parallel_workers > 0)
+ {
+ double parallel_divisor = get_parallel_divisor(path);
+
+ /* The CPU cost is divided among all the workers. */
+ cpu_run_cost /= parallel_divisor;
+
+ path->rows = clamp_row_est(path->rows / parallel_divisor);
+ }
+
+
+ run_cost += cpu_run_cost;
+
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->pathtarget->cost.startup;
+ run_cost += path->pathtarget->cost.per_tuple * path->rows;
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_bitmap_tree_node
+ * Extract cost and selectivity from a bitmap tree node (index/and/or)
+ */
+void
+cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
+{
+ if (IsA(path, IndexPath))
+ {
+ *cost = ((IndexPath *) path)->indextotalcost;
+ *selec = ((IndexPath *) path)->indexselectivity;
+
+ /*
+ * Charge a small amount per retrieved tuple to reflect the costs of
+ * manipulating the bitmap. This is mostly to make sure that a bitmap
+ * scan doesn't look to be the same cost as an indexscan to retrieve a
+ * single tuple.
+ */
+ *cost += 0.1 * cpu_operator_cost * path->rows;
+ }
+ else if (IsA(path, BitmapAndPath))
+ {
+ *cost = path->total_cost;
+ *selec = ((BitmapAndPath *) path)->bitmapselectivity;
+ }
+ else if (IsA(path, BitmapOrPath))
+ {
+ *cost = path->total_cost;
+ *selec = ((BitmapOrPath *) path)->bitmapselectivity;
+ }
+ else
+ {
+ elog(ERROR, "unrecognized node type: %d", nodeTag(path));
+ *cost = *selec = 0; /* keep compiler quiet */
+ }
+}
+
+/*
+ * cost_bitmap_and_node
+ * Estimate the cost of a BitmapAnd node
+ *
+ * Note that this considers only the costs of index scanning and bitmap
+ * creation, not the eventual heap access. In that sense the object isn't
+ * truly a Path, but it has enough path-like properties (costs in particular)
+ * to warrant treating it as one. We don't bother to set the path rows field,
+ * however.
+ */
+void
+cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
+{
+ Cost totalCost;
+ Selectivity selec;
+ ListCell *l;
+
+ /*
+ * We estimate AND selectivity on the assumption that the inputs are
+ * independent. This is probably often wrong, but we don't have the info
+ * to do better.
+ *
+ * The runtime cost of the BitmapAnd itself is estimated at 100x
+ * cpu_operator_cost for each tbm_intersect needed. Probably too small,
+ * definitely too simplistic?
+ */
+ totalCost = 0.0;
+ selec = 1.0;
+ foreach(l, path->bitmapquals)
+ {
+ Path *subpath = (Path *) lfirst(l);
+ Cost subCost;
+ Selectivity subselec;
+
+ cost_bitmap_tree_node(subpath, &subCost, &subselec);
+
+ selec *= subselec;
+
+ totalCost += subCost;
+ if (l != list_head(path->bitmapquals))
+ totalCost += 100.0 * cpu_operator_cost;
+ }
+ path->bitmapselectivity = selec;
+ path->path.rows = 0; /* per above, not used */
+ path->path.startup_cost = totalCost;
+ path->path.total_cost = totalCost;
+}
+
+/*
+ * cost_bitmap_or_node
+ * Estimate the cost of a BitmapOr node
+ *
+ * See comments for cost_bitmap_and_node.
+ */
+void
+cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
+{
+ Cost totalCost;
+ Selectivity selec;
+ ListCell *l;
+
+ /*
+ * We estimate OR selectivity on the assumption that the inputs are
+ * non-overlapping, since that's often the case in "x IN (list)" type
+ * situations. Of course, we clamp to 1.0 at the end.
+ *
+ * The runtime cost of the BitmapOr itself is estimated at 100x
+ * cpu_operator_cost for each tbm_union needed. Probably too small,
+ * definitely too simplistic? We are aware that the tbm_unions are
+ * optimized out when the inputs are BitmapIndexScans.
+ */
+ totalCost = 0.0;
+ selec = 0.0;
+ foreach(l, path->bitmapquals)
+ {
+ Path *subpath = (Path *) lfirst(l);
+ Cost subCost;
+ Selectivity subselec;
+
+ cost_bitmap_tree_node(subpath, &subCost, &subselec);
+
+ selec += subselec;
+
+ totalCost += subCost;
+ if (l != list_head(path->bitmapquals) &&
+ !IsA(subpath, IndexPath))
+ totalCost += 100.0 * cpu_operator_cost;
+ }
+ path->bitmapselectivity = Min(selec, 1.0);
+ path->path.rows = 0; /* per above, not used */
+ path->path.startup_cost = totalCost;
+ path->path.total_cost = totalCost;
+}
+
+/*
+ * cost_tidscan
+ * Determines and returns the cost of scanning a relation using TIDs.
+ *
+ * 'baserel' is the relation to be scanned
+ * 'tidquals' is the list of TID-checkable quals
+ * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
+ */
+void
+cost_tidscan(Path *path, PlannerInfo *root,
+ RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ bool isCurrentOf = false;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+ QualCost tid_qual_cost;
+ int ntuples;
+ ListCell *l;
+ double spc_random_page_cost;
+
+ /* Should only be applied to base relations */
+ Assert(baserel->relid > 0);
+ Assert(baserel->rtekind == RTE_RELATION);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ /* Count how many tuples we expect to retrieve */
+ ntuples = 0;
+ foreach(l, tidquals)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+ Expr *qual = rinfo->clause;
+
+ if (IsA(qual, ScalarArrayOpExpr))
+ {
+ /* Each element of the array yields 1 tuple */
+ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) qual;
+ Node *arraynode = (Node *) lsecond(saop->args);
+
+ ntuples += estimate_array_length(arraynode);
+ }
+ else if (IsA(qual, CurrentOfExpr))
+ {
+ /* CURRENT OF yields 1 tuple */
+ isCurrentOf = true;
+ ntuples++;
+ }
+ else
+ {
+ /* It's just CTID = something, count 1 tuple */
+ ntuples++;
+ }
+ }
+
+ /*
+ * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
+ * understands how to do it correctly. Therefore, honor enable_tidscan
+ * only when CURRENT OF isn't present. Also note that cost_qual_eval
+ * counts a CurrentOfExpr as having startup cost disable_cost, which we
+ * subtract off here; that's to prevent other plan types such as seqscan
+ * from winning.
+ */
+ if (isCurrentOf)
+ {
+ Assert(baserel->baserestrictcost.startup >= disable_cost);
+ startup_cost -= disable_cost;
+ }
+ else if (!enable_tidscan)
+ startup_cost += disable_cost;
+
+ /*
+ * The TID qual expressions will be computed once, any other baserestrict
+ * quals once per retrieved tuple.
+ */
+ cost_qual_eval(&tid_qual_cost, tidquals, root);
+
+ /* fetch estimated page cost for tablespace containing table */
+ get_tablespace_page_costs(baserel->reltablespace,
+ &spc_random_page_cost,
+ NULL);
+
+ /* disk costs --- assume each tuple on a different page */
+ run_cost += spc_random_page_cost * ntuples;
+
+ /* Add scanning CPU costs */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ /* XXX currently we assume TID quals are a subset of qpquals */
+ startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
+ cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
+ tid_qual_cost.per_tuple;
+ run_cost += cpu_per_tuple * ntuples;
+
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->pathtarget->cost.startup;
+ run_cost += path->pathtarget->cost.per_tuple * path->rows;
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_tidrangescan
+ * Determines and sets the costs of scanning a relation using a range of
+ * TIDs for 'path'
+ *
+ * 'baserel' is the relation to be scanned
+ * 'tidrangequals' is the list of TID-checkable range quals
+ * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
+ */
+void
+cost_tidrangescan(Path *path, PlannerInfo *root,
+ RelOptInfo *baserel, List *tidrangequals,
+ ParamPathInfo *param_info)
+{
+ Selectivity selectivity;
+ double pages;
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+ QualCost tid_qual_cost;
+ double ntuples;
+ double nseqpages;
+ double spc_random_page_cost;
+ double spc_seq_page_cost;
+
+ /* Should only be applied to base relations */
+ Assert(baserel->relid > 0);
+ Assert(baserel->rtekind == RTE_RELATION);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ /* Count how many tuples and pages we expect to scan */
+ selectivity = clauselist_selectivity(root, tidrangequals, baserel->relid,
+ JOIN_INNER, NULL);
+ pages = ceil(selectivity * baserel->pages);
+
+ if (pages <= 0.0)
+ pages = 1.0;
+
+ /*
+ * The first page in a range requires a random seek, but each subsequent
+ * page is just a normal sequential page read. NOTE: it's desirable for
+ * TID Range Scans to cost more than the equivalent Sequential Scans,
+ * because Seq Scans have some performance advantages such as scan
+ * synchronization and parallelizability, and we'd prefer one of them to
+ * be picked unless a TID Range Scan really is better.
+ */
+ ntuples = selectivity * baserel->tuples;
+ nseqpages = pages - 1.0;
+
+ if (!enable_tidscan)
+ startup_cost += disable_cost;
+
+ /*
+ * The TID qual expressions will be computed once, any other baserestrict
+ * quals once per retrieved tuple.
+ */
+ cost_qual_eval(&tid_qual_cost, tidrangequals, root);
+
+ /* fetch estimated page cost for tablespace containing table */
+ get_tablespace_page_costs(baserel->reltablespace,
+ &spc_random_page_cost,
+ &spc_seq_page_cost);
+
+ /* disk costs; 1 random page and the remainder as seq pages */
+ run_cost += spc_random_page_cost + spc_seq_page_cost * nseqpages;
+
+ /* Add scanning CPU costs */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ /*
+ * XXX currently we assume TID quals are a subset of qpquals at this
+ * point; they will be removed (if possible) when we create the plan, so
+ * we subtract their cost from the total qpqual cost. (If the TID quals
+ * can't be removed, this is a mistake and we're going to underestimate
+ * the CPU cost a bit.)
+ */
+ startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
+ cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
+ tid_qual_cost.per_tuple;
+ run_cost += cpu_per_tuple * ntuples;
+
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->pathtarget->cost.startup;
+ run_cost += path->pathtarget->cost.per_tuple * path->rows;
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_subqueryscan
+ * Determines and returns the cost of scanning a subquery RTE.
+ *
+ * 'baserel' is the relation to be scanned
+ * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
+ */
+void
+cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
+ RelOptInfo *baserel, ParamPathInfo *param_info)
+{
+ Cost startup_cost;
+ Cost run_cost;
+ List *qpquals;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+
+ /* Should only be applied to base relations that are subqueries */
+ Assert(baserel->relid > 0);
+ Assert(baserel->rtekind == RTE_SUBQUERY);
+
+ /*
+ * We compute the rowcount estimate as the subplan's estimate times the
+ * selectivity of relevant restriction clauses. In simple cases this will
+ * come out the same as baserel->rows; but when dealing with parallelized
+ * paths we must do it like this to get the right answer.
+ */
+ if (param_info)
+ qpquals = list_concat_copy(param_info->ppi_clauses,
+ baserel->baserestrictinfo);
+ else
+ qpquals = baserel->baserestrictinfo;
+
+ path->path.rows = clamp_row_est(path->subpath->rows *
+ clauselist_selectivity(root,
+ qpquals,
+ 0,
+ JOIN_INNER,
+ NULL));
+
+ /*
+ * Cost of path is cost of evaluating the subplan, plus cost of evaluating
+ * any restriction clauses and tlist that will be attached to the
+ * SubqueryScan node, plus cpu_tuple_cost to account for selection and
+ * projection overhead.
+ */
+ path->path.startup_cost = path->subpath->startup_cost;
+ path->path.total_cost = path->subpath->total_cost;
+
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ startup_cost = qpqual_cost.startup;
+ cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
+ run_cost = cpu_per_tuple * path->subpath->rows;
+
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->path.pathtarget->cost.startup;
+ run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
+
+ path->path.startup_cost += startup_cost;
+ path->path.total_cost += startup_cost + run_cost;
+}
+
+/*
+ * cost_functionscan
+ * Determines and returns the cost of scanning a function RTE.
+ *
+ * 'baserel' is the relation to be scanned
+ * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
+ */
+void
+cost_functionscan(Path *path, PlannerInfo *root,
+ RelOptInfo *baserel, ParamPathInfo *param_info)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+ RangeTblEntry *rte;
+ QualCost exprcost;
+
+ /* Should only be applied to base relations that are functions */
+ Assert(baserel->relid > 0);
+ rte = planner_rt_fetch(baserel->relid, root);
+ Assert(rte->rtekind == RTE_FUNCTION);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ /*
+ * Estimate costs of executing the function expression(s).
+ *
+ * Currently, nodeFunctionscan.c always executes the functions to
+ * completion before returning any rows, and caches the results in a
+ * tuplestore. So the function eval cost is all startup cost, and per-row
+ * costs are minimal.
+ *
+ * XXX in principle we ought to charge tuplestore spill costs if the
+ * number of rows is large. However, given how phony our rowcount
+ * estimates for functions tend to be, there's not a lot of point in that
+ * refinement right now.
+ */
+ cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
+
+ startup_cost += exprcost.startup + exprcost.per_tuple;
+
+ /* Add scanning CPU costs */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ startup_cost += qpqual_cost.startup;
+ cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
+ run_cost += cpu_per_tuple * baserel->tuples;
+
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->pathtarget->cost.startup;
+ run_cost += path->pathtarget->cost.per_tuple * path->rows;
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_tablefuncscan
+ * Determines and returns the cost of scanning a table function.
+ *
+ * 'baserel' is the relation to be scanned
+ * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
+ */
+void
+cost_tablefuncscan(Path *path, PlannerInfo *root,
+ RelOptInfo *baserel, ParamPathInfo *param_info)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+ RangeTblEntry *rte;
+ QualCost exprcost;
+
+ /* Should only be applied to base relations that are functions */
+ Assert(baserel->relid > 0);
+ rte = planner_rt_fetch(baserel->relid, root);
+ Assert(rte->rtekind == RTE_TABLEFUNC);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ /*
+ * Estimate costs of executing the table func expression(s).
+ *
+ * XXX in principle we ought to charge tuplestore spill costs if the
+ * number of rows is large. However, given how phony our rowcount
+ * estimates for tablefuncs tend to be, there's not a lot of point in that
+ * refinement right now.
+ */
+ cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
+
+ startup_cost += exprcost.startup + exprcost.per_tuple;
+
+ /* Add scanning CPU costs */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ startup_cost += qpqual_cost.startup;
+ cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
+ run_cost += cpu_per_tuple * baserel->tuples;
+
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->pathtarget->cost.startup;
+ run_cost += path->pathtarget->cost.per_tuple * path->rows;
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_valuesscan
+ * Determines and returns the cost of scanning a VALUES RTE.
+ *
+ * 'baserel' is the relation to be scanned
+ * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
+ */
+void
+cost_valuesscan(Path *path, PlannerInfo *root,
+ RelOptInfo *baserel, ParamPathInfo *param_info)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+
+ /* Should only be applied to base relations that are values lists */
+ Assert(baserel->relid > 0);
+ Assert(baserel->rtekind == RTE_VALUES);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ /*
+ * For now, estimate list evaluation cost at one operator eval per list
+ * (probably pretty bogus, but is it worth being smarter?)
+ */
+ cpu_per_tuple = cpu_operator_cost;
+
+ /* Add scanning CPU costs */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ startup_cost += qpqual_cost.startup;
+ cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
+ run_cost += cpu_per_tuple * baserel->tuples;
+
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->pathtarget->cost.startup;
+ run_cost += path->pathtarget->cost.per_tuple * path->rows;
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_ctescan
+ * Determines and returns the cost of scanning a CTE RTE.
+ *
+ * Note: this is used for both self-reference and regular CTEs; the
+ * possible cost differences are below the threshold of what we could
+ * estimate accurately anyway. Note that the costs of evaluating the
+ * referenced CTE query are added into the final plan as initplan costs,
+ * and should NOT be counted here.
+ */
+void
+cost_ctescan(Path *path, PlannerInfo *root,
+ RelOptInfo *baserel, ParamPathInfo *param_info)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+
+ /* Should only be applied to base relations that are CTEs */
+ Assert(baserel->relid > 0);
+ Assert(baserel->rtekind == RTE_CTE);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ /* Charge one CPU tuple cost per row for tuplestore manipulation */
+ cpu_per_tuple = cpu_tuple_cost;
+
+ /* Add scanning CPU costs */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ startup_cost += qpqual_cost.startup;
+ cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
+ run_cost += cpu_per_tuple * baserel->tuples;
+
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->pathtarget->cost.startup;
+ run_cost += path->pathtarget->cost.per_tuple * path->rows;
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_namedtuplestorescan
+ * Determines and returns the cost of scanning a named tuplestore.
+ */
+void
+cost_namedtuplestorescan(Path *path, PlannerInfo *root,
+ RelOptInfo *baserel, ParamPathInfo *param_info)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+
+ /* Should only be applied to base relations that are Tuplestores */
+ Assert(baserel->relid > 0);
+ Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ /* Charge one CPU tuple cost per row for tuplestore manipulation */
+ cpu_per_tuple = cpu_tuple_cost;
+
+ /* Add scanning CPU costs */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ startup_cost += qpqual_cost.startup;
+ cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
+ run_cost += cpu_per_tuple * baserel->tuples;
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_resultscan
+ * Determines and returns the cost of scanning an RTE_RESULT relation.
+ */
+void
+cost_resultscan(Path *path, PlannerInfo *root,
+ RelOptInfo *baserel, ParamPathInfo *param_info)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+
+ /* Should only be applied to RTE_RESULT base relations */
+ Assert(baserel->relid > 0);
+ Assert(baserel->rtekind == RTE_RESULT);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ /* We charge qual cost plus cpu_tuple_cost */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ startup_cost += qpqual_cost.startup;
+ cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
+ run_cost += cpu_per_tuple * baserel->tuples;
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_recursive_union
+ * Determines and returns the cost of performing a recursive union,
+ * and also the estimated output size.
+ *
+ * We are given Paths for the nonrecursive and recursive terms.
+ */
+void
+cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
+{
+ Cost startup_cost;
+ Cost total_cost;
+ double total_rows;
+
+ /* We probably have decent estimates for the non-recursive term */
+ startup_cost = nrterm->startup_cost;
+ total_cost = nrterm->total_cost;
+ total_rows = nrterm->rows;
+
+ /*
+ * We arbitrarily assume that about 10 recursive iterations will be
+ * needed, and that we've managed to get a good fix on the cost and output
+ * size of each one of them. These are mighty shaky assumptions but it's
+ * hard to see how to do better.
+ */
+ total_cost += 10 * rterm->total_cost;
+ total_rows += 10 * rterm->rows;
+
+ /*
+ * Also charge cpu_tuple_cost per row to account for the costs of
+ * manipulating the tuplestores. (We don't worry about possible
+ * spill-to-disk costs.)
+ */
+ total_cost += cpu_tuple_cost * total_rows;
+
+ runion->startup_cost = startup_cost;
+ runion->total_cost = total_cost;
+ runion->rows = total_rows;
+ runion->pathtarget->width = Max(nrterm->pathtarget->width,
+ rterm->pathtarget->width);
+}
+
+/*
+ * cost_tuplesort
+ * Determines and returns the cost of sorting a relation using tuplesort,
+ * not including the cost of reading the input data.
+ *
+ * If the total volume of data to sort is less than sort_mem, we will do
+ * an in-memory sort, which requires no I/O and about t*log2(t) tuple
+ * comparisons for t tuples.
+ *
+ * If the total volume exceeds sort_mem, we switch to a tape-style merge
+ * algorithm. There will still be about t*log2(t) tuple comparisons in
+ * total, but we will also need to write and read each tuple once per
+ * merge pass. We expect about ceil(logM(r)) merge passes where r is the
+ * number of initial runs formed and M is the merge order used by tuplesort.c.
+ * Since the average initial run should be about sort_mem, we have
+ * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
+ * cpu = comparison_cost * t * log2(t)
+ *
+ * If the sort is bounded (i.e., only the first k result tuples are needed)
+ * and k tuples can fit into sort_mem, we use a heap method that keeps only
+ * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
+ *
+ * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
+ * accesses (XXX can't we refine that guess?)
+ *
+ * By default, we charge two operator evals per tuple comparison, which should
+ * be in the right ballpark in most cases. The caller can tweak this by
+ * specifying nonzero comparison_cost; typically that's used for any extra
+ * work that has to be done to prepare the inputs to the comparison operators.
+ *
+ * 'tuples' is the number of tuples in the relation
+ * 'width' is the average tuple width in bytes
+ * 'comparison_cost' is the extra cost per comparison, if any
+ * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
+ * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
+ */
+static void
+cost_tuplesort(Cost *startup_cost, Cost *run_cost,
+ double tuples, int width,
+ Cost comparison_cost, int sort_mem,
+ double limit_tuples)
+{
+ double input_bytes = relation_byte_size(tuples, width);
+ double output_bytes;
+ double output_tuples;
+ long sort_mem_bytes = sort_mem * 1024L;
+
+ /*
+ * We want to be sure the cost of a sort is never estimated as zero, even
+ * if passed-in tuple count is zero. Besides, mustn't do log(0)...
+ */
+ if (tuples < 2.0)
+ tuples = 2.0;
+
+ /* Include the default cost-per-comparison */
+ comparison_cost += 2.0 * cpu_operator_cost;
+
+ /* Do we have a useful LIMIT? */
+ if (limit_tuples > 0 && limit_tuples < tuples)
+ {
+ output_tuples = limit_tuples;
+ output_bytes = relation_byte_size(output_tuples, width);
+ }
+ else
+ {
+ output_tuples = tuples;
+ output_bytes = input_bytes;
+ }
+
+ if (output_bytes > sort_mem_bytes)
+ {
+ /*
+ * We'll have to use a disk-based sort of all the tuples
+ */
+ double npages = ceil(input_bytes / BLCKSZ);
+ double nruns = input_bytes / sort_mem_bytes;
+ double mergeorder = tuplesort_merge_order(sort_mem_bytes);
+ double log_runs;
+ double npageaccesses;
+
+ /*
+ * CPU costs
+ *
+ * Assume about N log2 N comparisons
+ */
+ *startup_cost = comparison_cost * tuples * LOG2(tuples);
+
+ /* Disk costs */
+
+ /* Compute logM(r) as log(r) / log(M) */
+ if (nruns > mergeorder)
+ log_runs = ceil(log(nruns) / log(mergeorder));
+ else
+ log_runs = 1.0;
+ npageaccesses = 2.0 * npages * log_runs;
+ /* Assume 3/4ths of accesses are sequential, 1/4th are not */
+ *startup_cost += npageaccesses *
+ (seq_page_cost * 0.75 + random_page_cost * 0.25);
+ }
+ else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
+ {
+ /*
+ * We'll use a bounded heap-sort keeping just K tuples in memory, for
+ * a total number of tuple comparisons of N log2 K; but the constant
+ * factor is a bit higher than for quicksort. Tweak it so that the
+ * cost curve is continuous at the crossover point.
+ */
+ *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
+ }
+ else
+ {
+ /* We'll use plain quicksort on all the input tuples */
+ *startup_cost = comparison_cost * tuples * LOG2(tuples);
+ }
+
+ /*
+ * Also charge a small amount (arbitrarily set equal to operator cost) per
+ * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
+ * doesn't do qual-checking or projection, so it has less overhead than
+ * most plan nodes. Note it's correct to use tuples not output_tuples
+ * here --- the upper LIMIT will pro-rate the run cost so we'd be double
+ * counting the LIMIT otherwise.
+ */
+ *run_cost = cpu_operator_cost * tuples;
+}
+
+/*
+ * cost_incremental_sort
+ * Determines and returns the cost of sorting a relation incrementally, when
+ * the input path is presorted by a prefix of the pathkeys.
+ *
+ * 'presorted_keys' is the number of leading pathkeys by which the input path
+ * is sorted.
+ *
+ * We estimate the number of groups into which the relation is divided by the
+ * leading pathkeys, and then calculate the cost of sorting a single group
+ * with tuplesort using cost_tuplesort().
+ */
+void
+cost_incremental_sort(Path *path,
+ PlannerInfo *root, List *pathkeys, int presorted_keys,
+ Cost input_startup_cost, Cost input_total_cost,
+ double input_tuples, int width, Cost comparison_cost, int sort_mem,
+ double limit_tuples)
+{
+ Cost startup_cost = 0,
+ run_cost = 0,
+ input_run_cost = input_total_cost - input_startup_cost;
+ double group_tuples,
+ input_groups;
+ Cost group_startup_cost,
+ group_run_cost,
+ group_input_run_cost;
+ List *presortedExprs = NIL;
+ ListCell *l;
+ int i = 0;
+ bool unknown_varno = false;
+
+ Assert(presorted_keys != 0);
+
+ /*
+ * We want to be sure the cost of a sort is never estimated as zero, even
+ * if passed-in tuple count is zero. Besides, mustn't do log(0)...
+ */
+ if (input_tuples < 2.0)
+ input_tuples = 2.0;
+
+ /* Default estimate of number of groups, capped to one group per row. */
+ input_groups = Min(input_tuples, DEFAULT_NUM_DISTINCT);
+
+ /*
+ * Extract presorted keys as list of expressions.
+ *
+ * We need to be careful about Vars containing "varno 0" which might have
+ * been introduced by generate_append_tlist, which would confuse
+ * estimate_num_groups (in fact it'd fail for such expressions). See
+ * recurse_set_operations which has to deal with the same issue.
+ *
+ * Unlike recurse_set_operations we can't access the original target list
+ * here, and even if we could it's not very clear how useful would that be
+ * for a set operation combining multiple tables. So we simply detect if
+ * there are any expressions with "varno 0" and use the default
+ * DEFAULT_NUM_DISTINCT in that case.
+ *
+ * We might also use either 1.0 (a single group) or input_tuples (each row
+ * being a separate group), pretty much the worst and best case for
+ * incremental sort. But those are extreme cases and using something in
+ * between seems reasonable. Furthermore, generate_append_tlist is used
+ * for set operations, which are likely to produce mostly unique output
+ * anyway - from that standpoint the DEFAULT_NUM_DISTINCT is defensive
+ * while maintaining lower startup cost.
+ */
+ foreach(l, pathkeys)
+ {
+ PathKey *key = (PathKey *) lfirst(l);
+ EquivalenceMember *member = (EquivalenceMember *)
+ linitial(key->pk_eclass->ec_members);
+
+ /*
+ * Check if the expression contains Var with "varno 0" so that we
+ * don't call estimate_num_groups in that case.
+ */
+ if (bms_is_member(0, pull_varnos(root, (Node *) member->em_expr)))
+ {
+ unknown_varno = true;
+ break;
+ }
+
+ /* expression not containing any Vars with "varno 0" */
+ presortedExprs = lappend(presortedExprs, member->em_expr);
+
+ i++;
+ if (i >= presorted_keys)
+ break;
+ }
+
+ /* Estimate number of groups with equal presorted keys. */
+ if (!unknown_varno)
+ input_groups = estimate_num_groups(root, presortedExprs, input_tuples,
+ NULL, NULL);
+
+ group_tuples = input_tuples / input_groups;
+ group_input_run_cost = input_run_cost / input_groups;
+
+ /*
+ * Estimate average cost of sorting of one group where presorted keys are
+ * equal. Incremental sort is sensitive to distribution of tuples to the
+ * groups, where we're relying on quite rough assumptions. Thus, we're
+ * pessimistic about incremental sort performance and increase its average
+ * group size by half.
+ */
+ cost_tuplesort(&group_startup_cost, &group_run_cost,
+ 1.5 * group_tuples, width, comparison_cost, sort_mem,
+ limit_tuples);
+
+ /*
+ * Startup cost of incremental sort is the startup cost of its first group
+ * plus the cost of its input.
+ */
+ startup_cost += group_startup_cost
+ + input_startup_cost + group_input_run_cost;
+
+ /*
+ * After we started producing tuples from the first group, the cost of
+ * producing all the tuples is given by the cost to finish processing this
+ * group, plus the total cost to process the remaining groups, plus the
+ * remaining cost of input.
+ */
+ run_cost += group_run_cost
+ + (group_run_cost + group_startup_cost) * (input_groups - 1)
+ + group_input_run_cost * (input_groups - 1);
+
+ /*
+ * Incremental sort adds some overhead by itself. Firstly, it has to
+ * detect the sort groups. This is roughly equal to one extra copy and
+ * comparison per tuple. Secondly, it has to reset the tuplesort context
+ * for every group.
+ */
+ run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
+ run_cost += 2.0 * cpu_tuple_cost * input_groups;
+
+ path->rows = input_tuples;
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_sort
+ * Determines and returns the cost of sorting a relation, including
+ * the cost of reading the input data.
+ *
+ * NOTE: some callers currently pass NIL for pathkeys because they
+ * can't conveniently supply the sort keys. Since this routine doesn't
+ * currently do anything with pathkeys anyway, that doesn't matter...
+ * but if it ever does, it should react gracefully to lack of key data.
+ * (Actually, the thing we'd most likely be interested in is just the number
+ * of sort keys, which all callers *could* supply.)
+ */
+void
+cost_sort(Path *path, PlannerInfo *root,
+ List *pathkeys, Cost input_cost, double tuples, int width,
+ Cost comparison_cost, int sort_mem,
+ double limit_tuples)
+
+{
+ Cost startup_cost;
+ Cost run_cost;
+
+ cost_tuplesort(&startup_cost, &run_cost,
+ tuples, width,
+ comparison_cost, sort_mem,
+ limit_tuples);
+
+ if (!enable_sort)
+ startup_cost += disable_cost;
+
+ startup_cost += input_cost;
+
+ path->rows = tuples;
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * append_nonpartial_cost
+ * Estimate the cost of the non-partial paths in a Parallel Append.
+ * The non-partial paths are assumed to be the first "numpaths" paths
+ * from the subpaths list, and to be in order of decreasing cost.
+ */
+static Cost
+append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
+{
+ Cost *costarr;
+ int arrlen;
+ ListCell *l;
+ ListCell *cell;
+ int i;
+ int path_index;
+ int min_index;
+ int max_index;
+
+ if (numpaths == 0)
+ return 0;
+
+ /*
+ * Array length is number of workers or number of relevant paths,
+ * whichever is less.
+ */
+ arrlen = Min(parallel_workers, numpaths);
+ costarr = (Cost *) palloc(sizeof(Cost) * arrlen);
+
+ /* The first few paths will each be claimed by a different worker. */
+ path_index = 0;
+ foreach(cell, subpaths)
+ {
+ Path *subpath = (Path *) lfirst(cell);
+
+ if (path_index == arrlen)
+ break;
+ costarr[path_index++] = subpath->total_cost;
+ }
+
+ /*
+ * Since subpaths are sorted by decreasing cost, the last one will have
+ * the minimum cost.
+ */
+ min_index = arrlen - 1;
+
+ /*
+ * For each of the remaining subpaths, add its cost to the array element
+ * with minimum cost.
+ */
+ for_each_cell(l, subpaths, cell)
+ {
+ Path *subpath = (Path *) lfirst(l);
+ int i;
+
+ /* Consider only the non-partial paths */
+ if (path_index++ == numpaths)
+ break;
+
+ costarr[min_index] += subpath->total_cost;
+
+ /* Update the new min cost array index */
+ for (min_index = i = 0; i < arrlen; i++)
+ {
+ if (costarr[i] < costarr[min_index])
+ min_index = i;
+ }
+ }
+
+ /* Return the highest cost from the array */
+ for (max_index = i = 0; i < arrlen; i++)
+ {
+ if (costarr[i] > costarr[max_index])
+ max_index = i;
+ }
+
+ return costarr[max_index];
+}
+
+/*
+ * cost_append
+ * Determines and returns the cost of an Append node.
+ */
+void
+cost_append(AppendPath *apath)
+{
+ ListCell *l;
+
+ apath->path.startup_cost = 0;
+ apath->path.total_cost = 0;
+ apath->path.rows = 0;
+
+ if (apath->subpaths == NIL)
+ return;
+
+ if (!apath->path.parallel_aware)
+ {
+ List *pathkeys = apath->path.pathkeys;
+
+ if (pathkeys == NIL)
+ {
+ Path *subpath = (Path *) linitial(apath->subpaths);
+
+ /*
+ * For an unordered, non-parallel-aware Append we take the startup
+ * cost as the startup cost of the first subpath.
+ */
+ apath->path.startup_cost = subpath->startup_cost;
+
+ /* Compute rows and costs as sums of subplan rows and costs. */
+ foreach(l, apath->subpaths)
+ {
+ Path *subpath = (Path *) lfirst(l);
+
+ apath->path.rows += subpath->rows;
+ apath->path.total_cost += subpath->total_cost;
+ }
+ }
+ else
+ {
+ /*
+ * For an ordered, non-parallel-aware Append we take the startup
+ * cost as the sum of the subpath startup costs. This ensures
+ * that we don't underestimate the startup cost when a query's
+ * LIMIT is such that several of the children have to be run to
+ * satisfy it. This might be overkill --- another plausible hack
+ * would be to take the Append's startup cost as the maximum of
+ * the child startup costs. But we don't want to risk believing
+ * that an ORDER BY LIMIT query can be satisfied at small cost
+ * when the first child has small startup cost but later ones
+ * don't. (If we had the ability to deal with nonlinear cost
+ * interpolation for partial retrievals, we would not need to be
+ * so conservative about this.)
+ *
+ * This case is also different from the above in that we have to
+ * account for possibly injecting sorts into subpaths that aren't
+ * natively ordered.
+ */
+ foreach(l, apath->subpaths)
+ {
+ Path *subpath = (Path *) lfirst(l);
+ Path sort_path; /* dummy for result of cost_sort */
+
+ if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
+ {
+ /*
+ * We'll need to insert a Sort node, so include costs for
+ * that. We can use the parent's LIMIT if any, since we
+ * certainly won't pull more than that many tuples from
+ * any child.
+ */
+ cost_sort(&sort_path,
+ NULL, /* doesn't currently need root */
+ pathkeys,
+ subpath->total_cost,
+ subpath->rows,
+ subpath->pathtarget->width,
+ 0.0,
+ work_mem,
+ apath->limit_tuples);
+ subpath = &sort_path;
+ }
+
+ apath->path.rows += subpath->rows;
+ apath->path.startup_cost += subpath->startup_cost;
+ apath->path.total_cost += subpath->total_cost;
+ }
+ }
+ }
+ else /* parallel-aware */
+ {
+ int i = 0;
+ double parallel_divisor = get_parallel_divisor(&apath->path);
+
+ /* Parallel-aware Append never produces ordered output. */
+ Assert(apath->path.pathkeys == NIL);
+
+ /* Calculate startup cost. */
+ foreach(l, apath->subpaths)
+ {
+ Path *subpath = (Path *) lfirst(l);
+
+ /*
+ * Append will start returning tuples when the child node having
+ * lowest startup cost is done setting up. We consider only the
+ * first few subplans that immediately get a worker assigned.
+ */
+ if (i == 0)
+ apath->path.startup_cost = subpath->startup_cost;
+ else if (i < apath->path.parallel_workers)
+ apath->path.startup_cost = Min(apath->path.startup_cost,
+ subpath->startup_cost);
+
+ /*
+ * Apply parallel divisor to subpaths. Scale the number of rows
+ * for each partial subpath based on the ratio of the parallel
+ * divisor originally used for the subpath to the one we adopted.
+ * Also add the cost of partial paths to the total cost, but
+ * ignore non-partial paths for now.
+ */
+ if (i < apath->first_partial_path)
+ apath->path.rows += subpath->rows / parallel_divisor;
+ else
+ {
+ double subpath_parallel_divisor;
+
+ subpath_parallel_divisor = get_parallel_divisor(subpath);
+ apath->path.rows += subpath->rows * (subpath_parallel_divisor /
+ parallel_divisor);
+ apath->path.total_cost += subpath->total_cost;
+ }
+
+ apath->path.rows = clamp_row_est(apath->path.rows);
+
+ i++;
+ }
+
+ /* Add cost for non-partial subpaths. */
+ apath->path.total_cost +=
+ append_nonpartial_cost(apath->subpaths,
+ apath->first_partial_path,
+ apath->path.parallel_workers);
+ }
+
+ /*
+ * Although Append does not do any selection or projection, it's not free;
+ * add a small per-tuple overhead.
+ */
+ apath->path.total_cost +=
+ cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * apath->path.rows;
+}
+
+/*
+ * cost_merge_append
+ * Determines and returns the cost of a MergeAppend node.
+ *
+ * MergeAppend merges several pre-sorted input streams, using a heap that
+ * at any given instant holds the next tuple from each stream. If there
+ * are N streams, we need about N*log2(N) tuple comparisons to construct
+ * the heap at startup, and then for each output tuple, about log2(N)
+ * comparisons to replace the top entry.
+ *
+ * (The effective value of N will drop once some of the input streams are
+ * exhausted, but it seems unlikely to be worth trying to account for that.)
+ *
+ * The heap is never spilled to disk, since we assume N is not very large.
+ * So this is much simpler than cost_sort.
+ *
+ * As in cost_sort, we charge two operator evals per tuple comparison.
+ *
+ * 'pathkeys' is a list of sort keys
+ * 'n_streams' is the number of input streams
+ * 'input_startup_cost' is the sum of the input streams' startup costs
+ * 'input_total_cost' is the sum of the input streams' total costs
+ * 'tuples' is the number of tuples in all the streams
+ */
+void
+cost_merge_append(Path *path, PlannerInfo *root,
+ List *pathkeys, int n_streams,
+ Cost input_startup_cost, Cost input_total_cost,
+ double tuples)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ Cost comparison_cost;
+ double N;
+ double logN;
+
+ /*
+ * Avoid log(0)...
+ */
+ N = (n_streams < 2) ? 2.0 : (double) n_streams;
+ logN = LOG2(N);
+
+ /* Assumed cost per tuple comparison */
+ comparison_cost = 2.0 * cpu_operator_cost;
+
+ /* Heap creation cost */
+ startup_cost += comparison_cost * N * logN;
+
+ /* Per-tuple heap maintenance cost */
+ run_cost += tuples * comparison_cost * logN;
+
+ /*
+ * Although MergeAppend does not do any selection or projection, it's not
+ * free; add a small per-tuple overhead.
+ */
+ run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
+
+ path->startup_cost = startup_cost + input_startup_cost;
+ path->total_cost = startup_cost + run_cost + input_total_cost;
+}
+
+/*
+ * cost_material
+ * Determines and returns the cost of materializing a relation, including
+ * the cost of reading the input data.
+ *
+ * If the total volume of data to materialize exceeds work_mem, we will need
+ * to write it to disk, so the cost is much higher in that case.
+ *
+ * Note that here we are estimating the costs for the first scan of the
+ * relation, so the materialization is all overhead --- any savings will
+ * occur only on rescan, which is estimated in cost_rescan.
+ */
+void
+cost_material(Path *path,
+ Cost input_startup_cost, Cost input_total_cost,
+ double tuples, int width)
+{
+ Cost startup_cost = input_startup_cost;
+ Cost run_cost = input_total_cost - input_startup_cost;
+ double nbytes = relation_byte_size(tuples, width);
+ long work_mem_bytes = work_mem * 1024L;
+
+ path->rows = tuples;
+
+ /*
+ * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
+ * reflect bookkeeping overhead. (This rate must be more than what
+ * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
+ * if it is exactly the same then there will be a cost tie between
+ * nestloop with A outer, materialized B inner and nestloop with B outer,
+ * materialized A inner. The extra cost ensures we'll prefer
+ * materializing the smaller rel.) Note that this is normally a good deal
+ * less than cpu_tuple_cost; which is OK because a Material plan node
+ * doesn't do qual-checking or projection, so it's got less overhead than
+ * most plan nodes.
+ */
+ run_cost += 2 * cpu_operator_cost * tuples;
+
+ /*
+ * If we will spill to disk, charge at the rate of seq_page_cost per page.
+ * This cost is assumed to be evenly spread through the plan run phase,
+ * which isn't exactly accurate but our cost model doesn't allow for
+ * nonuniform costs within the run phase.
+ */
+ if (nbytes > work_mem_bytes)
+ {
+ double npages = ceil(nbytes / BLCKSZ);
+
+ run_cost += seq_page_cost * npages;
+ }
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_memoize_rescan
+ * Determines the estimated cost of rescanning a Memoize node.
+ *
+ * In order to estimate this, we must gain knowledge of how often we expect to
+ * be called and how many distinct sets of parameters we are likely to be
+ * called with. If we expect a good cache hit ratio, then we can set our
+ * costs to account for that hit ratio, plus a little bit of cost for the
+ * caching itself. Caching will not work out well if we expect to be called
+ * with too many distinct parameter values. The worst-case here is that we
+ * never see any parameter value twice, in which case we'd never get a cache
+ * hit and caching would be a complete waste of effort.
+ */
+static void
+cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
+ Cost *rescan_startup_cost, Cost *rescan_total_cost)
+{
+ EstimationInfo estinfo;
+ Cost input_startup_cost = mpath->subpath->startup_cost;
+ Cost input_total_cost = mpath->subpath->total_cost;
+ double tuples = mpath->subpath->rows;
+ double calls = mpath->calls;
+ int width = mpath->subpath->pathtarget->width;
+
+ double hash_mem_bytes;
+ double est_entry_bytes;
+ double est_cache_entries;
+ double ndistinct;
+ double evict_ratio;
+ double hit_ratio;
+ Cost startup_cost;
+ Cost total_cost;
+
+ /* available cache space */
+ hash_mem_bytes = get_hash_memory_limit();
+
+ /*
+ * Set the number of bytes each cache entry should consume in the cache.
+ * To provide us with better estimations on how many cache entries we can
+ * store at once, we make a call to the executor here to ask it what
+ * memory overheads there are for a single cache entry.
+ *
+ * XXX we also store the cache key, but that's not accounted for here.
+ */
+ est_entry_bytes = relation_byte_size(tuples, width) +
+ ExecEstimateCacheEntryOverheadBytes(tuples);
+
+ /* estimate on the upper limit of cache entries we can hold at once */
+ est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
+
+ /* estimate on the distinct number of parameter values */
+ ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL,
+ &estinfo);
+
+ /*
+ * When the estimation fell back on using a default value, it's a bit too
+ * risky to assume that it's ok to use a Memoize node. The use of a
+ * default could cause us to use a Memoize node when it's really
+ * inappropriate to do so. If we see that this has been done, then we'll
+ * assume that every call will have unique parameters, which will almost
+ * certainly mean a MemoizePath will never survive add_path().
+ */
+ if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
+ ndistinct = calls;
+
+ /*
+ * Since we've already estimated the maximum number of entries we can
+ * store at once and know the estimated number of distinct values we'll be
+ * called with, we'll take this opportunity to set the path's est_entries.
+ * This will ultimately determine the hash table size that the executor
+ * will use. If we leave this at zero, the executor will just choose the
+ * size itself. Really this is not the right place to do this, but it's
+ * convenient since everything is already calculated.
+ */
+ mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
+ PG_UINT32_MAX);
+
+ /*
+ * When the number of distinct parameter values is above the amount we can
+ * store in the cache, then we'll have to evict some entries from the
+ * cache. This is not free. Here we estimate how often we'll incur the
+ * cost of that eviction.
+ */
+ evict_ratio = 1.0 - Min(est_cache_entries, ndistinct) / ndistinct;
+
+ /*
+ * In order to estimate how costly a single scan will be, we need to
+ * attempt to estimate what the cache hit ratio will be. To do that we
+ * must look at how many scans are estimated in total for this node and
+ * how many of those scans we expect to get a cache hit.
+ */
+ hit_ratio = 1.0 / ndistinct * Min(est_cache_entries, ndistinct) -
+ (ndistinct / calls);
+
+ /* Ensure we don't go negative */
+ hit_ratio = Max(hit_ratio, 0.0);
+
+ /*
+ * Set the total_cost accounting for the expected cache hit ratio. We
+ * also add on a cpu_operator_cost to account for a cache lookup. This
+ * will happen regardless of whether it's a cache hit or not.
+ */
+ total_cost = input_total_cost * (1.0 - hit_ratio) + cpu_operator_cost;
+
+ /* Now adjust the total cost to account for cache evictions */
+
+ /* Charge a cpu_tuple_cost for evicting the actual cache entry */
+ total_cost += cpu_tuple_cost * evict_ratio;
+
+ /*
+ * Charge a 10th of cpu_operator_cost to evict every tuple in that entry.
+ * The per-tuple eviction is really just a pfree, so charging a whole
+ * cpu_operator_cost seems a little excessive.
+ */
+ total_cost += cpu_operator_cost / 10.0 * evict_ratio * tuples;
+
+ /*
+ * Now adjust for storing things in the cache, since that's not free
+ * either. Everything must go in the cache. We don't proportion this
+ * over any ratio, just apply it once for the scan. We charge a
+ * cpu_tuple_cost for the creation of the cache entry and also a
+ * cpu_operator_cost for each tuple we expect to cache.
+ */
+ total_cost += cpu_tuple_cost + cpu_operator_cost * tuples;
+
+ /*
+ * Getting the first row must be also be proportioned according to the
+ * expected cache hit ratio.
+ */
+ startup_cost = input_startup_cost * (1.0 - hit_ratio);
+
+ /*
+ * Additionally we charge a cpu_tuple_cost to account for cache lookups,
+ * which we'll do regardless of whether it was a cache hit or not.
+ */
+ startup_cost += cpu_tuple_cost;
+
+ *rescan_startup_cost = startup_cost;
+ *rescan_total_cost = total_cost;
+}
+
+/*
+ * cost_agg
+ * Determines and returns the cost of performing an Agg plan node,
+ * including the cost of its input.
+ *
+ * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
+ * we are using a hashed Agg node just to do grouping).
+ *
+ * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
+ * are for appropriately-sorted input.
+ */
+void
+cost_agg(Path *path, PlannerInfo *root,
+ AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
+ int numGroupCols, double numGroups,
+ List *quals,
+ Cost input_startup_cost, Cost input_total_cost,
+ double input_tuples, double input_width)
+{
+ double output_tuples;
+ Cost startup_cost;
+ Cost total_cost;
+ AggClauseCosts dummy_aggcosts;
+
+ /* Use all-zero per-aggregate costs if NULL is passed */
+ if (aggcosts == NULL)
+ {
+ Assert(aggstrategy == AGG_HASHED);
+ MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
+ aggcosts = &dummy_aggcosts;
+ }
+
+ /*
+ * The transCost.per_tuple component of aggcosts should be charged once
+ * per input tuple, corresponding to the costs of evaluating the aggregate
+ * transfns and their input expressions. The finalCost.per_tuple component
+ * is charged once per output tuple, corresponding to the costs of
+ * evaluating the finalfns. Startup costs are of course charged but once.
+ *
+ * If we are grouping, we charge an additional cpu_operator_cost per
+ * grouping column per input tuple for grouping comparisons.
+ *
+ * We will produce a single output tuple if not grouping, and a tuple per
+ * group otherwise. We charge cpu_tuple_cost for each output tuple.
+ *
+ * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
+ * same total CPU cost, but AGG_SORTED has lower startup cost. If the
+ * input path is already sorted appropriately, AGG_SORTED should be
+ * preferred (since it has no risk of memory overflow). This will happen
+ * as long as the computed total costs are indeed exactly equal --- but if
+ * there's roundoff error we might do the wrong thing. So be sure that
+ * the computations below form the same intermediate values in the same
+ * order.
+ */
+ if (aggstrategy == AGG_PLAIN)
+ {
+ startup_cost = input_total_cost;
+ startup_cost += aggcosts->transCost.startup;
+ startup_cost += aggcosts->transCost.per_tuple * input_tuples;
+ startup_cost += aggcosts->finalCost.startup;
+ startup_cost += aggcosts->finalCost.per_tuple;
+ /* we aren't grouping */
+ total_cost = startup_cost + cpu_tuple_cost;
+ output_tuples = 1;
+ }
+ else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
+ {
+ /* Here we are able to deliver output on-the-fly */
+ startup_cost = input_startup_cost;
+ total_cost = input_total_cost;
+ if (aggstrategy == AGG_MIXED && !enable_hashagg)
+ {
+ startup_cost += disable_cost;
+ total_cost += disable_cost;
+ }
+ /* calcs phrased this way to match HASHED case, see note above */
+ total_cost += aggcosts->transCost.startup;
+ total_cost += aggcosts->transCost.per_tuple * input_tuples;
+ total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
+ total_cost += aggcosts->finalCost.startup;
+ total_cost += aggcosts->finalCost.per_tuple * numGroups;
+ total_cost += cpu_tuple_cost * numGroups;
+ output_tuples = numGroups;
+ }
+ else
+ {
+ /* must be AGG_HASHED */
+ startup_cost = input_total_cost;
+ if (!enable_hashagg)
+ startup_cost += disable_cost;
+ startup_cost += aggcosts->transCost.startup;
+ startup_cost += aggcosts->transCost.per_tuple * input_tuples;
+ /* cost of computing hash value */
+ startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
+ startup_cost += aggcosts->finalCost.startup;
+
+ total_cost = startup_cost;
+ total_cost += aggcosts->finalCost.per_tuple * numGroups;
+ /* cost of retrieving from hash table */
+ total_cost += cpu_tuple_cost * numGroups;
+ output_tuples = numGroups;
+ }
+
+ /*
+ * Add the disk costs of hash aggregation that spills to disk.
+ *
+ * Groups that go into the hash table stay in memory until finalized, so
+ * spilling and reprocessing tuples doesn't incur additional invocations
+ * of transCost or finalCost. Furthermore, the computed hash value is
+ * stored with the spilled tuples, so we don't incur extra invocations of
+ * the hash function.
+ *
+ * Hash Agg begins returning tuples after the first batch is complete.
+ * Accrue writes (spilled tuples) to startup_cost and to total_cost;
+ * accrue reads only to total_cost.
+ */
+ if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
+ {
+ double pages;
+ double pages_written = 0.0;
+ double pages_read = 0.0;
+ double spill_cost;
+ double hashentrysize;
+ double nbatches;
+ Size mem_limit;
+ uint64 ngroups_limit;
+ int num_partitions;
+ int depth;
+
+ /*
+ * Estimate number of batches based on the computed limits. If less
+ * than or equal to one, all groups are expected to fit in memory;
+ * otherwise we expect to spill.
+ */
+ hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
+ input_width,
+ aggcosts->transitionSpace);
+ hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
+ &ngroups_limit, &num_partitions);
+
+ nbatches = Max((numGroups * hashentrysize) / mem_limit,
+ numGroups / ngroups_limit);
+
+ nbatches = Max(ceil(nbatches), 1.0);
+ num_partitions = Max(num_partitions, 2);
+
+ /*
+ * The number of partitions can change at different levels of
+ * recursion; but for the purposes of this calculation assume it stays
+ * constant.
+ */
+ depth = ceil(log(nbatches) / log(num_partitions));
+
+ /*
+ * Estimate number of pages read and written. For each level of
+ * recursion, a tuple must be written and then later read.
+ */
+ pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
+ pages_written = pages_read = pages * depth;
+
+ /*
+ * HashAgg has somewhat worse IO behavior than Sort on typical
+ * hardware/OS combinations. Account for this with a generic penalty.
+ */
+ pages_read *= 2.0;
+ pages_written *= 2.0;
+
+ startup_cost += pages_written * random_page_cost;
+ total_cost += pages_written * random_page_cost;
+ total_cost += pages_read * seq_page_cost;
+
+ /* account for CPU cost of spilling a tuple and reading it back */
+ spill_cost = depth * input_tuples * 2.0 * cpu_tuple_cost;
+ startup_cost += spill_cost;
+ total_cost += spill_cost;
+ }
+
+ /*
+ * If there are quals (HAVING quals), account for their cost and
+ * selectivity.
+ */
+ if (quals)
+ {
+ QualCost qual_cost;
+
+ cost_qual_eval(&qual_cost, quals, root);
+ startup_cost += qual_cost.startup;
+ total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
+
+ output_tuples = clamp_row_est(output_tuples *
+ clauselist_selectivity(root,
+ quals,
+ 0,
+ JOIN_INNER,
+ NULL));
+ }
+
+ path->rows = output_tuples;
+ path->startup_cost = startup_cost;
+ path->total_cost = total_cost;
+}
+
+/*
+ * cost_windowagg
+ * Determines and returns the cost of performing a WindowAgg plan node,
+ * including the cost of its input.
+ *
+ * Input is assumed already properly sorted.
+ */
+void
+cost_windowagg(Path *path, PlannerInfo *root,
+ List *windowFuncs, int numPartCols, int numOrderCols,
+ Cost input_startup_cost, Cost input_total_cost,
+ double input_tuples)
+{
+ Cost startup_cost;
+ Cost total_cost;
+ ListCell *lc;
+
+ startup_cost = input_startup_cost;
+ total_cost = input_total_cost;
+
+ /*
+ * Window functions are assumed to cost their stated execution cost, plus
+ * the cost of evaluating their input expressions, per tuple. Since they
+ * may in fact evaluate their inputs at multiple rows during each cycle,
+ * this could be a drastic underestimate; but without a way to know how
+ * many rows the window function will fetch, it's hard to do better. In
+ * any case, it's a good estimate for all the built-in window functions,
+ * so we'll just do this for now.
+ */
+ foreach(lc, windowFuncs)
+ {
+ WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
+ Cost wfunccost;
+ QualCost argcosts;
+
+ argcosts.startup = argcosts.per_tuple = 0;
+ add_function_cost(root, wfunc->winfnoid, (Node *) wfunc,
+ &argcosts);
+ startup_cost += argcosts.startup;
+ wfunccost = argcosts.per_tuple;
+
+ /* also add the input expressions' cost to per-input-row costs */
+ cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
+ startup_cost += argcosts.startup;
+ wfunccost += argcosts.per_tuple;
+
+ /*
+ * Add the filter's cost to per-input-row costs. XXX We should reduce
+ * input expression costs according to filter selectivity.
+ */
+ cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
+ startup_cost += argcosts.startup;
+ wfunccost += argcosts.per_tuple;
+
+ total_cost += wfunccost * input_tuples;
+ }
+
+ /*
+ * We also charge cpu_operator_cost per grouping column per tuple for
+ * grouping comparisons, plus cpu_tuple_cost per tuple for general
+ * overhead.
+ *
+ * XXX this neglects costs of spooling the data to disk when it overflows
+ * work_mem. Sooner or later that should get accounted for.
+ */
+ total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
+ total_cost += cpu_tuple_cost * input_tuples;
+
+ path->rows = input_tuples;
+ path->startup_cost = startup_cost;
+ path->total_cost = total_cost;
+}
+
+/*
+ * cost_group
+ * Determines and returns the cost of performing a Group plan node,
+ * including the cost of its input.
+ *
+ * Note: caller must ensure that input costs are for appropriately-sorted
+ * input.
+ */
+void
+cost_group(Path *path, PlannerInfo *root,
+ int numGroupCols, double numGroups,
+ List *quals,
+ Cost input_startup_cost, Cost input_total_cost,
+ double input_tuples)
+{
+ double output_tuples;
+ Cost startup_cost;
+ Cost total_cost;
+
+ output_tuples = numGroups;
+ startup_cost = input_startup_cost;
+ total_cost = input_total_cost;
+
+ /*
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples.
+ */
+ total_cost += cpu_operator_cost * input_tuples * numGroupCols;
+
+ /*
+ * If there are quals (HAVING quals), account for their cost and
+ * selectivity.
+ */
+ if (quals)
+ {
+ QualCost qual_cost;
+
+ cost_qual_eval(&qual_cost, quals, root);
+ startup_cost += qual_cost.startup;
+ total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
+
+ output_tuples = clamp_row_est(output_tuples *
+ clauselist_selectivity(root,
+ quals,
+ 0,
+ JOIN_INNER,
+ NULL));
+ }
+
+ path->rows = output_tuples;
+ path->startup_cost = startup_cost;
+ path->total_cost = total_cost;
+}
+
+/*
+ * initial_cost_nestloop
+ * Preliminary estimate of the cost of a nestloop join path.
+ *
+ * This must quickly produce lower-bound estimates of the path's startup and
+ * total costs. If we are unable to eliminate the proposed path from
+ * consideration using the lower bounds, final_cost_nestloop will be called
+ * to obtain the final estimates.
+ *
+ * The exact division of labor between this function and final_cost_nestloop
+ * is private to them, and represents a tradeoff between speed of the initial
+ * estimate and getting a tight lower bound. We choose to not examine the
+ * join quals here, since that's by far the most expensive part of the
+ * calculations. The end result is that CPU-cost considerations must be
+ * left for the second phase; and for SEMI/ANTI joins, we must also postpone
+ * incorporation of the inner path's run cost.
+ *
+ * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
+ * other data to be used by final_cost_nestloop
+ * 'jointype' is the type of join to be performed
+ * 'outer_path' is the outer input to the join
+ * 'inner_path' is the inner input to the join
+ * 'extra' contains miscellaneous information about the join
+ */
+void
+initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
+ JoinType jointype,
+ Path *outer_path, Path *inner_path,
+ JoinPathExtraData *extra)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ double outer_path_rows = outer_path->rows;
+ Cost inner_rescan_start_cost;
+ Cost inner_rescan_total_cost;
+ Cost inner_run_cost;
+ Cost inner_rescan_run_cost;
+
+ /* estimate costs to rescan the inner relation */
+ cost_rescan(root, inner_path,
+ &inner_rescan_start_cost,
+ &inner_rescan_total_cost);
+
+ /* cost of source data */
+
+ /*
+ * NOTE: clearly, we must pay both outer and inner paths' startup_cost
+ * before we can start returning tuples, so the join's startup cost is
+ * their sum. We'll also pay the inner path's rescan startup cost
+ * multiple times.
+ */
+ startup_cost += outer_path->startup_cost + inner_path->startup_cost;
+ run_cost += outer_path->total_cost - outer_path->startup_cost;
+ if (outer_path_rows > 1)
+ run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
+
+ inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
+ inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
+
+ if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
+ extra->inner_unique)
+ {
+ /*
+ * With a SEMI or ANTI join, or if the innerrel is known unique, the
+ * executor will stop after the first match.
+ *
+ * Getting decent estimates requires inspection of the join quals,
+ * which we choose to postpone to final_cost_nestloop.
+ */
+
+ /* Save private data for final_cost_nestloop */
+ workspace->inner_run_cost = inner_run_cost;
+ workspace->inner_rescan_run_cost = inner_rescan_run_cost;
+ }
+ else
+ {
+ /* Normal case; we'll scan whole input rel for each outer row */
+ run_cost += inner_run_cost;
+ if (outer_path_rows > 1)
+ run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
+ }
+
+ /* CPU costs left for later */
+
+ /* Public result fields */
+ workspace->startup_cost = startup_cost;
+ workspace->total_cost = startup_cost + run_cost;
+ /* Save private data for final_cost_nestloop */
+ workspace->run_cost = run_cost;
+}
+
+/*
+ * final_cost_nestloop
+ * Final estimate of the cost and result size of a nestloop join path.
+ *
+ * 'path' is already filled in except for the rows and cost fields
+ * 'workspace' is the result from initial_cost_nestloop
+ * 'extra' contains miscellaneous information about the join
+ */
+void
+final_cost_nestloop(PlannerInfo *root, NestPath *path,
+ JoinCostWorkspace *workspace,
+ JoinPathExtraData *extra)
+{
+ Path *outer_path = path->jpath.outerjoinpath;
+ Path *inner_path = path->jpath.innerjoinpath;
+ double outer_path_rows = outer_path->rows;
+ double inner_path_rows = inner_path->rows;
+ Cost startup_cost = workspace->startup_cost;
+ Cost run_cost = workspace->run_cost;
+ Cost cpu_per_tuple;
+ QualCost restrict_qual_cost;
+ double ntuples;
+
+ /* Protect some assumptions below that rowcounts aren't zero */
+ if (outer_path_rows <= 0)
+ outer_path_rows = 1;
+ if (inner_path_rows <= 0)
+ inner_path_rows = 1;
+ /* Mark the path with the correct row estimate */
+ if (path->jpath.path.param_info)
+ path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
+ else
+ path->jpath.path.rows = path->jpath.path.parent->rows;
+
+ /* For partial paths, scale row estimate. */
+ if (path->jpath.path.parallel_workers > 0)
+ {
+ double parallel_divisor = get_parallel_divisor(&path->jpath.path);
+
+ path->jpath.path.rows =
+ clamp_row_est(path->jpath.path.rows / parallel_divisor);
+ }
+
+ /*
+ * We could include disable_cost in the preliminary estimate, but that
+ * would amount to optimizing for the case where the join method is
+ * disabled, which doesn't seem like the way to bet.
+ */
+ if (!enable_nestloop)
+ startup_cost += disable_cost;
+
+ /* cost of inner-relation source data (we already dealt with outer rel) */
+
+ if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI ||
+ extra->inner_unique)
+ {
+ /*
+ * With a SEMI or ANTI join, or if the innerrel is known unique, the
+ * executor will stop after the first match.
+ */
+ Cost inner_run_cost = workspace->inner_run_cost;
+ Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
+ double outer_matched_rows;
+ double outer_unmatched_rows;
+ Selectivity inner_scan_frac;
+
+ /*
+ * For an outer-rel row that has at least one match, we can expect the
+ * inner scan to stop after a fraction 1/(match_count+1) of the inner
+ * rows, if the matches are evenly distributed. Since they probably
+ * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
+ * that fraction. (If we used a larger fuzz factor, we'd have to
+ * clamp inner_scan_frac to at most 1.0; but since match_count is at
+ * least 1, no such clamp is needed now.)
+ */
+ outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
+ outer_unmatched_rows = outer_path_rows - outer_matched_rows;
+ inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
+
+ /*
+ * Compute number of tuples processed (not number emitted!). First,
+ * account for successfully-matched outer rows.
+ */
+ ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
+
+ /*
+ * Now we need to estimate the actual costs of scanning the inner
+ * relation, which may be quite a bit less than N times inner_run_cost
+ * due to early scan stops. We consider two cases. If the inner path
+ * is an indexscan using all the joinquals as indexquals, then an
+ * unmatched outer row results in an indexscan returning no rows,
+ * which is probably quite cheap. Otherwise, the executor will have
+ * to scan the whole inner rel for an unmatched row; not so cheap.
+ */
+ if (has_indexed_join_quals(path))
+ {
+ /*
+ * Successfully-matched outer rows will only require scanning
+ * inner_scan_frac of the inner relation. In this case, we don't
+ * need to charge the full inner_run_cost even when that's more
+ * than inner_rescan_run_cost, because we can assume that none of
+ * the inner scans ever scan the whole inner relation. So it's
+ * okay to assume that all the inner scan executions can be
+ * fractions of the full cost, even if materialization is reducing
+ * the rescan cost. At this writing, it's impossible to get here
+ * for a materialized inner scan, so inner_run_cost and
+ * inner_rescan_run_cost will be the same anyway; but just in
+ * case, use inner_run_cost for the first matched tuple and
+ * inner_rescan_run_cost for additional ones.
+ */
+ run_cost += inner_run_cost * inner_scan_frac;
+ if (outer_matched_rows > 1)
+ run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
+
+ /*
+ * Add the cost of inner-scan executions for unmatched outer rows.
+ * We estimate this as the same cost as returning the first tuple
+ * of a nonempty scan. We consider that these are all rescans,
+ * since we used inner_run_cost once already.
+ */
+ run_cost += outer_unmatched_rows *
+ inner_rescan_run_cost / inner_path_rows;
+
+ /*
+ * We won't be evaluating any quals at all for unmatched rows, so
+ * don't add them to ntuples.
+ */
+ }
+ else
+ {
+ /*
+ * Here, a complicating factor is that rescans may be cheaper than
+ * first scans. If we never scan all the way to the end of the
+ * inner rel, it might be (depending on the plan type) that we'd
+ * never pay the whole inner first-scan run cost. However it is
+ * difficult to estimate whether that will happen (and it could
+ * not happen if there are any unmatched outer rows!), so be
+ * conservative and always charge the whole first-scan cost once.
+ * We consider this charge to correspond to the first unmatched
+ * outer row, unless there isn't one in our estimate, in which
+ * case blame it on the first matched row.
+ */
+
+ /* First, count all unmatched join tuples as being processed */
+ ntuples += outer_unmatched_rows * inner_path_rows;
+
+ /* Now add the forced full scan, and decrement appropriate count */
+ run_cost += inner_run_cost;
+ if (outer_unmatched_rows >= 1)
+ outer_unmatched_rows -= 1;
+ else
+ outer_matched_rows -= 1;
+
+ /* Add inner run cost for additional outer tuples having matches */
+ if (outer_matched_rows > 0)
+ run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
+
+ /* Add inner run cost for additional unmatched outer tuples */
+ if (outer_unmatched_rows > 0)
+ run_cost += outer_unmatched_rows * inner_rescan_run_cost;
+ }
+ }
+ else
+ {
+ /* Normal-case source costs were included in preliminary estimate */
+
+ /* Compute number of tuples processed (not number emitted!) */
+ ntuples = outer_path_rows * inner_path_rows;
+ }
+
+ /* CPU costs */
+ cost_qual_eval(&restrict_qual_cost, path->jpath.joinrestrictinfo, root);
+ startup_cost += restrict_qual_cost.startup;
+ cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
+ run_cost += cpu_per_tuple * ntuples;
+
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->jpath.path.pathtarget->cost.startup;
+ run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
+
+ path->jpath.path.startup_cost = startup_cost;
+ path->jpath.path.total_cost = startup_cost + run_cost;
+}
+
+/*
+ * initial_cost_mergejoin
+ * Preliminary estimate of the cost of a mergejoin path.
+ *
+ * This must quickly produce lower-bound estimates of the path's startup and
+ * total costs. If we are unable to eliminate the proposed path from
+ * consideration using the lower bounds, final_cost_mergejoin will be called
+ * to obtain the final estimates.
+ *
+ * The exact division of labor between this function and final_cost_mergejoin
+ * is private to them, and represents a tradeoff between speed of the initial
+ * estimate and getting a tight lower bound. We choose to not examine the
+ * join quals here, except for obtaining the scan selectivity estimate which
+ * is really essential (but fortunately, use of caching keeps the cost of
+ * getting that down to something reasonable).
+ * We also assume that cost_sort is cheap enough to use here.
+ *
+ * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
+ * other data to be used by final_cost_mergejoin
+ * 'jointype' is the type of join to be performed
+ * 'mergeclauses' is the list of joinclauses to be used as merge clauses
+ * 'outer_path' is the outer input to the join
+ * 'inner_path' is the inner input to the join
+ * 'outersortkeys' is the list of sort keys for the outer path
+ * 'innersortkeys' is the list of sort keys for the inner path
+ * 'extra' contains miscellaneous information about the join
+ *
+ * Note: outersortkeys and innersortkeys should be NIL if no explicit
+ * sort is needed because the respective source path is already ordered.
+ */
+void
+initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
+ JoinType jointype,
+ List *mergeclauses,
+ Path *outer_path, Path *inner_path,
+ List *outersortkeys, List *innersortkeys,
+ JoinPathExtraData *extra)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ double outer_path_rows = outer_path->rows;
+ double inner_path_rows = inner_path->rows;
+ Cost inner_run_cost;
+ double outer_rows,
+ inner_rows,
+ outer_skip_rows,
+ inner_skip_rows;
+ Selectivity outerstartsel,
+ outerendsel,
+ innerstartsel,
+ innerendsel;
+ Path sort_path; /* dummy for result of cost_sort */
+
+ /* Protect some assumptions below that rowcounts aren't zero */
+ if (outer_path_rows <= 0)
+ outer_path_rows = 1;
+ if (inner_path_rows <= 0)
+ inner_path_rows = 1;
+
+ /*
+ * A merge join will stop as soon as it exhausts either input stream
+ * (unless it's an outer join, in which case the outer side has to be
+ * scanned all the way anyway). Estimate fraction of the left and right
+ * inputs that will actually need to be scanned. Likewise, we can
+ * estimate the number of rows that will be skipped before the first join
+ * pair is found, which should be factored into startup cost. We use only
+ * the first (most significant) merge clause for this purpose. Since
+ * mergejoinscansel() is a fairly expensive computation, we cache the
+ * results in the merge clause RestrictInfo.
+ */
+ if (mergeclauses && jointype != JOIN_FULL)
+ {
+ RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
+ List *opathkeys;
+ List *ipathkeys;
+ PathKey *opathkey;
+ PathKey *ipathkey;
+ MergeScanSelCache *cache;
+
+ /* Get the input pathkeys to determine the sort-order details */
+ opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
+ ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
+ Assert(opathkeys);
+ Assert(ipathkeys);
+ opathkey = (PathKey *) linitial(opathkeys);
+ ipathkey = (PathKey *) linitial(ipathkeys);
+ /* debugging check */
+ if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
+ opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
+ opathkey->pk_strategy != ipathkey->pk_strategy ||
+ opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
+ elog(ERROR, "left and right pathkeys do not match in mergejoin");
+
+ /* Get the selectivity with caching */
+ cache = cached_scansel(root, firstclause, opathkey);
+
+ if (bms_is_subset(firstclause->left_relids,
+ outer_path->parent->relids))
+ {
+ /* left side of clause is outer */
+ outerstartsel = cache->leftstartsel;
+ outerendsel = cache->leftendsel;
+ innerstartsel = cache->rightstartsel;
+ innerendsel = cache->rightendsel;
+ }
+ else
+ {
+ /* left side of clause is inner */
+ outerstartsel = cache->rightstartsel;
+ outerendsel = cache->rightendsel;
+ innerstartsel = cache->leftstartsel;
+ innerendsel = cache->leftendsel;
+ }
+ if (jointype == JOIN_LEFT ||
+ jointype == JOIN_ANTI)
+ {
+ outerstartsel = 0.0;
+ outerendsel = 1.0;
+ }
+ else if (jointype == JOIN_RIGHT)
+ {
+ innerstartsel = 0.0;
+ innerendsel = 1.0;
+ }
+ }
+ else
+ {
+ /* cope with clauseless or full mergejoin */
+ outerstartsel = innerstartsel = 0.0;
+ outerendsel = innerendsel = 1.0;
+ }
+
+ /*
+ * Convert selectivities to row counts. We force outer_rows and
+ * inner_rows to be at least 1, but the skip_rows estimates can be zero.
+ */
+ outer_skip_rows = rint(outer_path_rows * outerstartsel);
+ inner_skip_rows = rint(inner_path_rows * innerstartsel);
+ outer_rows = clamp_row_est(outer_path_rows * outerendsel);
+ inner_rows = clamp_row_est(inner_path_rows * innerendsel);
+
+ Assert(outer_skip_rows <= outer_rows);
+ Assert(inner_skip_rows <= inner_rows);
+
+ /*
+ * Readjust scan selectivities to account for above rounding. This is
+ * normally an insignificant effect, but when there are only a few rows in
+ * the inputs, failing to do this makes for a large percentage error.
+ */
+ outerstartsel = outer_skip_rows / outer_path_rows;
+ innerstartsel = inner_skip_rows / inner_path_rows;
+ outerendsel = outer_rows / outer_path_rows;
+ innerendsel = inner_rows / inner_path_rows;
+
+ Assert(outerstartsel <= outerendsel);
+ Assert(innerstartsel <= innerendsel);
+
+ /* cost of source data */
+
+ if (outersortkeys) /* do we need to sort outer? */
+ {
+ cost_sort(&sort_path,
+ root,
+ outersortkeys,
+ outer_path->total_cost,
+ outer_path_rows,
+ outer_path->pathtarget->width,
+ 0.0,
+ work_mem,
+ -1.0);
+ startup_cost += sort_path.startup_cost;
+ startup_cost += (sort_path.total_cost - sort_path.startup_cost)
+ * outerstartsel;
+ run_cost += (sort_path.total_cost - sort_path.startup_cost)
+ * (outerendsel - outerstartsel);
+ }
+ else
+ {
+ startup_cost += outer_path->startup_cost;
+ startup_cost += (outer_path->total_cost - outer_path->startup_cost)
+ * outerstartsel;
+ run_cost += (outer_path->total_cost - outer_path->startup_cost)
+ * (outerendsel - outerstartsel);
+ }
+
+ if (innersortkeys) /* do we need to sort inner? */
+ {
+ cost_sort(&sort_path,
+ root,
+ innersortkeys,
+ inner_path->total_cost,
+ inner_path_rows,
+ inner_path->pathtarget->width,
+ 0.0,
+ work_mem,
+ -1.0);
+ startup_cost += sort_path.startup_cost;
+ startup_cost += (sort_path.total_cost - sort_path.startup_cost)
+ * innerstartsel;
+ inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
+ * (innerendsel - innerstartsel);
+ }
+ else
+ {
+ startup_cost += inner_path->startup_cost;
+ startup_cost += (inner_path->total_cost - inner_path->startup_cost)
+ * innerstartsel;
+ inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
+ * (innerendsel - innerstartsel);
+ }
+
+ /*
+ * We can't yet determine whether rescanning occurs, or whether
+ * materialization of the inner input should be done. The minimum
+ * possible inner input cost, regardless of rescan and materialization
+ * considerations, is inner_run_cost. We include that in
+ * workspace->total_cost, but not yet in run_cost.
+ */
+
+ /* CPU costs left for later */
+
+ /* Public result fields */
+ workspace->startup_cost = startup_cost;
+ workspace->total_cost = startup_cost + run_cost + inner_run_cost;
+ /* Save private data for final_cost_mergejoin */
+ workspace->run_cost = run_cost;
+ workspace->inner_run_cost = inner_run_cost;
+ workspace->outer_rows = outer_rows;
+ workspace->inner_rows = inner_rows;
+ workspace->outer_skip_rows = outer_skip_rows;
+ workspace->inner_skip_rows = inner_skip_rows;
+}
+
+/*
+ * final_cost_mergejoin
+ * Final estimate of the cost and result size of a mergejoin path.
+ *
+ * Unlike other costsize functions, this routine makes two actual decisions:
+ * whether the executor will need to do mark/restore, and whether we should
+ * materialize the inner path. It would be logically cleaner to build
+ * separate paths testing these alternatives, but that would require repeating
+ * most of the cost calculations, which are not all that cheap. Since the
+ * choice will not affect output pathkeys or startup cost, only total cost,
+ * there is no possibility of wanting to keep more than one path. So it seems
+ * best to make the decisions here and record them in the path's
+ * skip_mark_restore and materialize_inner fields.
+ *
+ * Mark/restore overhead is usually required, but can be skipped if we know
+ * that the executor need find only one match per outer tuple, and that the
+ * mergeclauses are sufficient to identify a match.
+ *
+ * We materialize the inner path if we need mark/restore and either the inner
+ * path can't support mark/restore, or it's cheaper to use an interposed
+ * Material node to handle mark/restore.
+ *
+ * 'path' is already filled in except for the rows and cost fields and
+ * skip_mark_restore and materialize_inner
+ * 'workspace' is the result from initial_cost_mergejoin
+ * 'extra' contains miscellaneous information about the join
+ */
+void
+final_cost_mergejoin(PlannerInfo *root, MergePath *path,
+ JoinCostWorkspace *workspace,
+ JoinPathExtraData *extra)
+{
+ Path *outer_path = path->jpath.outerjoinpath;
+ Path *inner_path = path->jpath.innerjoinpath;
+ double inner_path_rows = inner_path->rows;
+ List *mergeclauses = path->path_mergeclauses;
+ List *innersortkeys = path->innersortkeys;
+ Cost startup_cost = workspace->startup_cost;
+ Cost run_cost = workspace->run_cost;
+ Cost inner_run_cost = workspace->inner_run_cost;
+ double outer_rows = workspace->outer_rows;
+ double inner_rows = workspace->inner_rows;
+ double outer_skip_rows = workspace->outer_skip_rows;
+ double inner_skip_rows = workspace->inner_skip_rows;
+ Cost cpu_per_tuple,
+ bare_inner_cost,
+ mat_inner_cost;
+ QualCost merge_qual_cost;
+ QualCost qp_qual_cost;
+ double mergejointuples,
+ rescannedtuples;
+ double rescanratio;
+
+ /* Protect some assumptions below that rowcounts aren't zero */
+ if (inner_path_rows <= 0)
+ inner_path_rows = 1;
+
+ /* Mark the path with the correct row estimate */
+ if (path->jpath.path.param_info)
+ path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
+ else
+ path->jpath.path.rows = path->jpath.path.parent->rows;
+
+ /* For partial paths, scale row estimate. */
+ if (path->jpath.path.parallel_workers > 0)
+ {
+ double parallel_divisor = get_parallel_divisor(&path->jpath.path);
+
+ path->jpath.path.rows =
+ clamp_row_est(path->jpath.path.rows / parallel_divisor);
+ }
+
+ /*
+ * We could include disable_cost in the preliminary estimate, but that
+ * would amount to optimizing for the case where the join method is
+ * disabled, which doesn't seem like the way to bet.
+ */
+ if (!enable_mergejoin)
+ startup_cost += disable_cost;
+
+ /*
+ * Compute cost of the mergequals and qpquals (other restriction clauses)
+ * separately.
+ */
+ cost_qual_eval(&merge_qual_cost, mergeclauses, root);
+ cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
+ qp_qual_cost.startup -= merge_qual_cost.startup;
+ qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
+
+ /*
+ * With a SEMI or ANTI join, or if the innerrel is known unique, the
+ * executor will stop scanning for matches after the first match. When
+ * all the joinclauses are merge clauses, this means we don't ever need to
+ * back up the merge, and so we can skip mark/restore overhead.
+ */
+ if ((path->jpath.jointype == JOIN_SEMI ||
+ path->jpath.jointype == JOIN_ANTI ||
+ extra->inner_unique) &&
+ (list_length(path->jpath.joinrestrictinfo) ==
+ list_length(path->path_mergeclauses)))
+ path->skip_mark_restore = true;
+ else
+ path->skip_mark_restore = false;
+
+ /*
+ * Get approx # tuples passing the mergequals. We use approx_tuple_count
+ * here because we need an estimate done with JOIN_INNER semantics.
+ */
+ mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
+
+ /*
+ * When there are equal merge keys in the outer relation, the mergejoin
+ * must rescan any matching tuples in the inner relation. This means
+ * re-fetching inner tuples; we have to estimate how often that happens.
+ *
+ * For regular inner and outer joins, the number of re-fetches can be
+ * estimated approximately as size of merge join output minus size of
+ * inner relation. Assume that the distinct key values are 1, 2, ..., and
+ * denote the number of values of each key in the outer relation as m1,
+ * m2, ...; in the inner relation, n1, n2, ... Then we have
+ *
+ * size of join = m1 * n1 + m2 * n2 + ...
+ *
+ * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
+ * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
+ * relation
+ *
+ * This equation works correctly for outer tuples having no inner match
+ * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
+ * are effectively subtracting those from the number of rescanned tuples,
+ * when we should not. Can we do better without expensive selectivity
+ * computations?
+ *
+ * The whole issue is moot if we are working from a unique-ified outer
+ * input, or if we know we don't need to mark/restore at all.
+ */
+ if (IsA(outer_path, UniquePath) || path->skip_mark_restore)
+ rescannedtuples = 0;
+ else
+ {
+ rescannedtuples = mergejointuples - inner_path_rows;
+ /* Must clamp because of possible underestimate */
+ if (rescannedtuples < 0)
+ rescannedtuples = 0;
+ }
+
+ /*
+ * We'll inflate various costs this much to account for rescanning. Note
+ * that this is to be multiplied by something involving inner_rows, or
+ * another number related to the portion of the inner rel we'll scan.
+ */
+ rescanratio = 1.0 + (rescannedtuples / inner_rows);
+
+ /*
+ * Decide whether we want to materialize the inner input to shield it from
+ * mark/restore and performing re-fetches. Our cost model for regular
+ * re-fetches is that a re-fetch costs the same as an original fetch,
+ * which is probably an overestimate; but on the other hand we ignore the
+ * bookkeeping costs of mark/restore. Not clear if it's worth developing
+ * a more refined model. So we just need to inflate the inner run cost by
+ * rescanratio.
+ */
+ bare_inner_cost = inner_run_cost * rescanratio;
+
+ /*
+ * When we interpose a Material node the re-fetch cost is assumed to be
+ * just cpu_operator_cost per tuple, independently of the underlying
+ * plan's cost; and we charge an extra cpu_operator_cost per original
+ * fetch as well. Note that we're assuming the materialize node will
+ * never spill to disk, since it only has to remember tuples back to the
+ * last mark. (If there are a huge number of duplicates, our other cost
+ * factors will make the path so expensive that it probably won't get
+ * chosen anyway.) So we don't use cost_rescan here.
+ *
+ * Note: keep this estimate in sync with create_mergejoin_plan's labeling
+ * of the generated Material node.
+ */
+ mat_inner_cost = inner_run_cost +
+ cpu_operator_cost * inner_rows * rescanratio;
+
+ /*
+ * If we don't need mark/restore at all, we don't need materialization.
+ */
+ if (path->skip_mark_restore)
+ path->materialize_inner = false;
+
+ /*
+ * Prefer materializing if it looks cheaper, unless the user has asked to
+ * suppress materialization.
+ */
+ else if (enable_material && mat_inner_cost < bare_inner_cost)
+ path->materialize_inner = true;
+
+ /*
+ * Even if materializing doesn't look cheaper, we *must* do it if the
+ * inner path is to be used directly (without sorting) and it doesn't
+ * support mark/restore.
+ *
+ * Since the inner side must be ordered, and only Sorts and IndexScans can
+ * create order to begin with, and they both support mark/restore, you
+ * might think there's no problem --- but you'd be wrong. Nestloop and
+ * merge joins can *preserve* the order of their inputs, so they can be
+ * selected as the input of a mergejoin, and they don't support
+ * mark/restore at present.
+ *
+ * We don't test the value of enable_material here, because
+ * materialization is required for correctness in this case, and turning
+ * it off does not entitle us to deliver an invalid plan.
+ */
+ else if (innersortkeys == NIL &&
+ !ExecSupportsMarkRestore(inner_path))
+ path->materialize_inner = true;
+
+ /*
+ * Also, force materializing if the inner path is to be sorted and the
+ * sort is expected to spill to disk. This is because the final merge
+ * pass can be done on-the-fly if it doesn't have to support mark/restore.
+ * We don't try to adjust the cost estimates for this consideration,
+ * though.
+ *
+ * Since materialization is a performance optimization in this case,
+ * rather than necessary for correctness, we skip it if enable_material is
+ * off.
+ */
+ else if (enable_material && innersortkeys != NIL &&
+ relation_byte_size(inner_path_rows,
+ inner_path->pathtarget->width) >
+ (work_mem * 1024L))
+ path->materialize_inner = true;
+ else
+ path->materialize_inner = false;
+
+ /* Charge the right incremental cost for the chosen case */
+ if (path->materialize_inner)
+ run_cost += mat_inner_cost;
+ else
+ run_cost += bare_inner_cost;
+
+ /* CPU costs */
+
+ /*
+ * The number of tuple comparisons needed is approximately number of outer
+ * rows plus number of inner rows plus number of rescanned tuples (can we
+ * refine this?). At each one, we need to evaluate the mergejoin quals.
+ */
+ startup_cost += merge_qual_cost.startup;
+ startup_cost += merge_qual_cost.per_tuple *
+ (outer_skip_rows + inner_skip_rows * rescanratio);
+ run_cost += merge_qual_cost.per_tuple *
+ ((outer_rows - outer_skip_rows) +
+ (inner_rows - inner_skip_rows) * rescanratio);
+
+ /*
+ * For each tuple that gets through the mergejoin proper, we charge
+ * cpu_tuple_cost plus the cost of evaluating additional restriction
+ * clauses that are to be applied at the join. (This is pessimistic since
+ * not all of the quals may get evaluated at each tuple.)
+ *
+ * Note: we could adjust for SEMI/ANTI joins skipping some qual
+ * evaluations here, but it's probably not worth the trouble.
+ */
+ startup_cost += qp_qual_cost.startup;
+ cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
+ run_cost += cpu_per_tuple * mergejointuples;
+
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->jpath.path.pathtarget->cost.startup;
+ run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
+
+ path->jpath.path.startup_cost = startup_cost;
+ path->jpath.path.total_cost = startup_cost + run_cost;
+}
+
+/*
+ * run mergejoinscansel() with caching
+ */
+static MergeScanSelCache *
+cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
+{
+ MergeScanSelCache *cache;
+ ListCell *lc;
+ Selectivity leftstartsel,
+ leftendsel,
+ rightstartsel,
+ rightendsel;
+ MemoryContext oldcontext;
+
+ /* Do we have this result already? */
+ foreach(lc, rinfo->scansel_cache)
+ {
+ cache = (MergeScanSelCache *) lfirst(lc);
+ if (cache->opfamily == pathkey->pk_opfamily &&
+ cache->collation == pathkey->pk_eclass->ec_collation &&
+ cache->strategy == pathkey->pk_strategy &&
+ cache->nulls_first == pathkey->pk_nulls_first)
+ return cache;
+ }
+
+ /* Nope, do the computation */
+ mergejoinscansel(root,
+ (Node *) rinfo->clause,
+ pathkey->pk_opfamily,
+ pathkey->pk_strategy,
+ pathkey->pk_nulls_first,
+ &leftstartsel,
+ &leftendsel,
+ &rightstartsel,
+ &rightendsel);
+
+ /* Cache the result in suitably long-lived workspace */
+ oldcontext = MemoryContextSwitchTo(root->planner_cxt);
+
+ cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
+ cache->opfamily = pathkey->pk_opfamily;
+ cache->collation = pathkey->pk_eclass->ec_collation;
+ cache->strategy = pathkey->pk_strategy;
+ cache->nulls_first = pathkey->pk_nulls_first;
+ cache->leftstartsel = leftstartsel;
+ cache->leftendsel = leftendsel;
+ cache->rightstartsel = rightstartsel;
+ cache->rightendsel = rightendsel;
+
+ rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
+
+ MemoryContextSwitchTo(oldcontext);
+
+ return cache;
+}
+
+/*
+ * initial_cost_hashjoin
+ * Preliminary estimate of the cost of a hashjoin path.
+ *
+ * This must quickly produce lower-bound estimates of the path's startup and
+ * total costs. If we are unable to eliminate the proposed path from
+ * consideration using the lower bounds, final_cost_hashjoin will be called
+ * to obtain the final estimates.
+ *
+ * The exact division of labor between this function and final_cost_hashjoin
+ * is private to them, and represents a tradeoff between speed of the initial
+ * estimate and getting a tight lower bound. We choose to not examine the
+ * join quals here (other than by counting the number of hash clauses),
+ * so we can't do much with CPU costs. We do assume that
+ * ExecChooseHashTableSize is cheap enough to use here.
+ *
+ * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
+ * other data to be used by final_cost_hashjoin
+ * 'jointype' is the type of join to be performed
+ * 'hashclauses' is the list of joinclauses to be used as hash clauses
+ * 'outer_path' is the outer input to the join
+ * 'inner_path' is the inner input to the join
+ * 'extra' contains miscellaneous information about the join
+ * 'parallel_hash' indicates that inner_path is partial and that a shared
+ * hash table will be built in parallel
+ */
+void
+initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
+ JoinType jointype,
+ List *hashclauses,
+ Path *outer_path, Path *inner_path,
+ JoinPathExtraData *extra,
+ bool parallel_hash)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ double outer_path_rows = outer_path->rows;
+ double inner_path_rows = inner_path->rows;
+ double inner_path_rows_total = inner_path_rows;
+ int num_hashclauses = list_length(hashclauses);
+ int numbuckets;
+ int numbatches;
+ int num_skew_mcvs;
+ size_t space_allowed; /* unused */
+
+ /* cost of source data */
+ startup_cost += outer_path->startup_cost;
+ run_cost += outer_path->total_cost - outer_path->startup_cost;
+ startup_cost += inner_path->total_cost;
+
+ /*
+ * Cost of computing hash function: must do it once per input tuple. We
+ * charge one cpu_operator_cost for each column's hash function. Also,
+ * tack on one cpu_tuple_cost per inner row, to model the costs of
+ * inserting the row into the hashtable.
+ *
+ * XXX when a hashclause is more complex than a single operator, we really
+ * should charge the extra eval costs of the left or right side, as
+ * appropriate, here. This seems more work than it's worth at the moment.
+ */
+ startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
+ * inner_path_rows;
+ run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
+
+ /*
+ * If this is a parallel hash build, then the value we have for
+ * inner_rows_total currently refers only to the rows returned by each
+ * participant. For shared hash table size estimation, we need the total
+ * number, so we need to undo the division.
+ */
+ if (parallel_hash)
+ inner_path_rows_total *= get_parallel_divisor(inner_path);
+
+ /*
+ * Get hash table size that executor would use for inner relation.
+ *
+ * XXX for the moment, always assume that skew optimization will be
+ * performed. As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
+ * trying to determine that for sure.
+ *
+ * XXX at some point it might be interesting to try to account for skew
+ * optimization in the cost estimate, but for now, we don't.
+ */
+ ExecChooseHashTableSize(inner_path_rows_total,
+ inner_path->pathtarget->width,
+ true, /* useskew */
+ parallel_hash, /* try_combined_hash_mem */
+ outer_path->parallel_workers,
+ &space_allowed,
+ &numbuckets,
+ &numbatches,
+ &num_skew_mcvs);
+
+ /*
+ * If inner relation is too big then we will need to "batch" the join,
+ * which implies writing and reading most of the tuples to disk an extra
+ * time. Charge seq_page_cost per page, since the I/O should be nice and
+ * sequential. Writing the inner rel counts as startup cost, all the rest
+ * as run cost.
+ */
+ if (numbatches > 1)
+ {
+ double outerpages = page_size(outer_path_rows,
+ outer_path->pathtarget->width);
+ double innerpages = page_size(inner_path_rows,
+ inner_path->pathtarget->width);
+
+ startup_cost += seq_page_cost * innerpages;
+ run_cost += seq_page_cost * (innerpages + 2 * outerpages);
+ }
+
+ /* CPU costs left for later */
+
+ /* Public result fields */
+ workspace->startup_cost = startup_cost;
+ workspace->total_cost = startup_cost + run_cost;
+ /* Save private data for final_cost_hashjoin */
+ workspace->run_cost = run_cost;
+ workspace->numbuckets = numbuckets;
+ workspace->numbatches = numbatches;
+ workspace->inner_rows_total = inner_path_rows_total;
+}
+
+/*
+ * final_cost_hashjoin
+ * Final estimate of the cost and result size of a hashjoin path.
+ *
+ * Note: the numbatches estimate is also saved into 'path' for use later
+ *
+ * 'path' is already filled in except for the rows and cost fields and
+ * num_batches
+ * 'workspace' is the result from initial_cost_hashjoin
+ * 'extra' contains miscellaneous information about the join
+ */
+void
+final_cost_hashjoin(PlannerInfo *root, HashPath *path,
+ JoinCostWorkspace *workspace,
+ JoinPathExtraData *extra)
+{
+ Path *outer_path = path->jpath.outerjoinpath;
+ Path *inner_path = path->jpath.innerjoinpath;
+ double outer_path_rows = outer_path->rows;
+ double inner_path_rows = inner_path->rows;
+ double inner_path_rows_total = workspace->inner_rows_total;
+ List *hashclauses = path->path_hashclauses;
+ Cost startup_cost = workspace->startup_cost;
+ Cost run_cost = workspace->run_cost;
+ int numbuckets = workspace->numbuckets;
+ int numbatches = workspace->numbatches;
+ Cost cpu_per_tuple;
+ QualCost hash_qual_cost;
+ QualCost qp_qual_cost;
+ double hashjointuples;
+ double virtualbuckets;
+ Selectivity innerbucketsize;
+ Selectivity innermcvfreq;
+ ListCell *hcl;
+
+ /* Mark the path with the correct row estimate */
+ if (path->jpath.path.param_info)
+ path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
+ else
+ path->jpath.path.rows = path->jpath.path.parent->rows;
+
+ /* For partial paths, scale row estimate. */
+ if (path->jpath.path.parallel_workers > 0)
+ {
+ double parallel_divisor = get_parallel_divisor(&path->jpath.path);
+
+ path->jpath.path.rows =
+ clamp_row_est(path->jpath.path.rows / parallel_divisor);
+ }
+
+ /*
+ * We could include disable_cost in the preliminary estimate, but that
+ * would amount to optimizing for the case where the join method is
+ * disabled, which doesn't seem like the way to bet.
+ */
+ if (!enable_hashjoin)
+ startup_cost += disable_cost;
+
+ /* mark the path with estimated # of batches */
+ path->num_batches = numbatches;
+
+ /* store the total number of tuples (sum of partial row estimates) */
+ path->inner_rows_total = inner_path_rows_total;
+
+ /* and compute the number of "virtual" buckets in the whole join */
+ virtualbuckets = (double) numbuckets * (double) numbatches;
+
+ /*
+ * Determine bucketsize fraction and MCV frequency for the inner relation.
+ * We use the smallest bucketsize or MCV frequency estimated for any
+ * individual hashclause; this is undoubtedly conservative.
+ *
+ * BUT: if inner relation has been unique-ified, we can assume it's good
+ * for hashing. This is important both because it's the right answer, and
+ * because we avoid contaminating the cache with a value that's wrong for
+ * non-unique-ified paths.
+ */
+ if (IsA(inner_path, UniquePath))
+ {
+ innerbucketsize = 1.0 / virtualbuckets;
+ innermcvfreq = 0.0;
+ }
+ else
+ {
+ innerbucketsize = 1.0;
+ innermcvfreq = 1.0;
+ foreach(hcl, hashclauses)
+ {
+ RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
+ Selectivity thisbucketsize;
+ Selectivity thismcvfreq;
+
+ /*
+ * First we have to figure out which side of the hashjoin clause
+ * is the inner side.
+ *
+ * Since we tend to visit the same clauses over and over when
+ * planning a large query, we cache the bucket stats estimates in
+ * the RestrictInfo node to avoid repeated lookups of statistics.
+ */
+ if (bms_is_subset(restrictinfo->right_relids,
+ inner_path->parent->relids))
+ {
+ /* righthand side is inner */
+ thisbucketsize = restrictinfo->right_bucketsize;
+ if (thisbucketsize < 0)
+ {
+ /* not cached yet */
+ estimate_hash_bucket_stats(root,
+ get_rightop(restrictinfo->clause),
+ virtualbuckets,
+ &restrictinfo->right_mcvfreq,
+ &restrictinfo->right_bucketsize);
+ thisbucketsize = restrictinfo->right_bucketsize;
+ }
+ thismcvfreq = restrictinfo->right_mcvfreq;
+ }
+ else
+ {
+ Assert(bms_is_subset(restrictinfo->left_relids,
+ inner_path->parent->relids));
+ /* lefthand side is inner */
+ thisbucketsize = restrictinfo->left_bucketsize;
+ if (thisbucketsize < 0)
+ {
+ /* not cached yet */
+ estimate_hash_bucket_stats(root,
+ get_leftop(restrictinfo->clause),
+ virtualbuckets,
+ &restrictinfo->left_mcvfreq,
+ &restrictinfo->left_bucketsize);
+ thisbucketsize = restrictinfo->left_bucketsize;
+ }
+ thismcvfreq = restrictinfo->left_mcvfreq;
+ }
+
+ if (innerbucketsize > thisbucketsize)
+ innerbucketsize = thisbucketsize;
+ if (innermcvfreq > thismcvfreq)
+ innermcvfreq = thismcvfreq;
+ }
+ }
+
+ /*
+ * If the bucket holding the inner MCV would exceed hash_mem, we don't
+ * want to hash unless there is really no other alternative, so apply
+ * disable_cost. (The executor normally copes with excessive memory usage
+ * by splitting batches, but obviously it cannot separate equal values
+ * that way, so it will be unable to drive the batch size below hash_mem
+ * when this is true.)
+ */
+ if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
+ inner_path->pathtarget->width) > get_hash_memory_limit())
+ startup_cost += disable_cost;
+
+ /*
+ * Compute cost of the hashquals and qpquals (other restriction clauses)
+ * separately.
+ */
+ cost_qual_eval(&hash_qual_cost, hashclauses, root);
+ cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
+ qp_qual_cost.startup -= hash_qual_cost.startup;
+ qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
+
+ /* CPU costs */
+
+ if (path->jpath.jointype == JOIN_SEMI ||
+ path->jpath.jointype == JOIN_ANTI ||
+ extra->inner_unique)
+ {
+ double outer_matched_rows;
+ Selectivity inner_scan_frac;
+
+ /*
+ * With a SEMI or ANTI join, or if the innerrel is known unique, the
+ * executor will stop after the first match.
+ *
+ * For an outer-rel row that has at least one match, we can expect the
+ * bucket scan to stop after a fraction 1/(match_count+1) of the
+ * bucket's rows, if the matches are evenly distributed. Since they
+ * probably aren't quite evenly distributed, we apply a fuzz factor of
+ * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
+ * to clamp inner_scan_frac to at most 1.0; but since match_count is
+ * at least 1, no such clamp is needed now.)
+ */
+ outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
+ inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
+
+ startup_cost += hash_qual_cost.startup;
+ run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
+ clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
+
+ /*
+ * For unmatched outer-rel rows, the picture is quite a lot different.
+ * In the first place, there is no reason to assume that these rows
+ * preferentially hit heavily-populated buckets; instead assume they
+ * are uncorrelated with the inner distribution and so they see an
+ * average bucket size of inner_path_rows / virtualbuckets. In the
+ * second place, it seems likely that they will have few if any exact
+ * hash-code matches and so very few of the tuples in the bucket will
+ * actually require eval of the hash quals. We don't have any good
+ * way to estimate how many will, but for the moment assume that the
+ * effective cost per bucket entry is one-tenth what it is for
+ * matchable tuples.
+ */
+ run_cost += hash_qual_cost.per_tuple *
+ (outer_path_rows - outer_matched_rows) *
+ clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
+
+ /* Get # of tuples that will pass the basic join */
+ if (path->jpath.jointype == JOIN_ANTI)
+ hashjointuples = outer_path_rows - outer_matched_rows;
+ else
+ hashjointuples = outer_matched_rows;
+ }
+ else
+ {
+ /*
+ * The number of tuple comparisons needed is the number of outer
+ * tuples times the typical number of tuples in a hash bucket, which
+ * is the inner relation size times its bucketsize fraction. At each
+ * one, we need to evaluate the hashjoin quals. But actually,
+ * charging the full qual eval cost at each tuple is pessimistic,
+ * since we don't evaluate the quals unless the hash values match
+ * exactly. For lack of a better idea, halve the cost estimate to
+ * allow for that.
+ */
+ startup_cost += hash_qual_cost.startup;
+ run_cost += hash_qual_cost.per_tuple * outer_path_rows *
+ clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
+
+ /*
+ * Get approx # tuples passing the hashquals. We use
+ * approx_tuple_count here because we need an estimate done with
+ * JOIN_INNER semantics.
+ */
+ hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
+ }
+
+ /*
+ * For each tuple that gets through the hashjoin proper, we charge
+ * cpu_tuple_cost plus the cost of evaluating additional restriction
+ * clauses that are to be applied at the join. (This is pessimistic since
+ * not all of the quals may get evaluated at each tuple.)
+ */
+ startup_cost += qp_qual_cost.startup;
+ cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
+ run_cost += cpu_per_tuple * hashjointuples;
+
+ /* tlist eval costs are paid per output row, not per tuple scanned */
+ startup_cost += path->jpath.path.pathtarget->cost.startup;
+ run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
+
+ path->jpath.path.startup_cost = startup_cost;
+ path->jpath.path.total_cost = startup_cost + run_cost;
+}
+
+
+/*
+ * cost_subplan
+ * Figure the costs for a SubPlan (or initplan).
+ *
+ * Note: we could dig the subplan's Plan out of the root list, but in practice
+ * all callers have it handy already, so we make them pass it.
+ */
+void
+cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
+{
+ QualCost sp_cost;
+
+ /* Figure any cost for evaluating the testexpr */
+ cost_qual_eval(&sp_cost,
+ make_ands_implicit((Expr *) subplan->testexpr),
+ root);
+
+ if (subplan->useHashTable)
+ {
+ /*
+ * If we are using a hash table for the subquery outputs, then the
+ * cost of evaluating the query is a one-time cost. We charge one
+ * cpu_operator_cost per tuple for the work of loading the hashtable,
+ * too.
+ */
+ sp_cost.startup += plan->total_cost +
+ cpu_operator_cost * plan->plan_rows;
+
+ /*
+ * The per-tuple costs include the cost of evaluating the lefthand
+ * expressions, plus the cost of probing the hashtable. We already
+ * accounted for the lefthand expressions as part of the testexpr, and
+ * will also have counted one cpu_operator_cost for each comparison
+ * operator. That is probably too low for the probing cost, but it's
+ * hard to make a better estimate, so live with it for now.
+ */
+ }
+ else
+ {
+ /*
+ * Otherwise we will be rescanning the subplan output on each
+ * evaluation. We need to estimate how much of the output we will
+ * actually need to scan. NOTE: this logic should agree with the
+ * tuple_fraction estimates used by make_subplan() in
+ * plan/subselect.c.
+ */
+ Cost plan_run_cost = plan->total_cost - plan->startup_cost;
+
+ if (subplan->subLinkType == EXISTS_SUBLINK)
+ {
+ /* we only need to fetch 1 tuple; clamp to avoid zero divide */
+ sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
+ }
+ else if (subplan->subLinkType == ALL_SUBLINK ||
+ subplan->subLinkType == ANY_SUBLINK)
+ {
+ /* assume we need 50% of the tuples */
+ sp_cost.per_tuple += 0.50 * plan_run_cost;
+ /* also charge a cpu_operator_cost per row examined */
+ sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
+ }
+ else
+ {
+ /* assume we need all tuples */
+ sp_cost.per_tuple += plan_run_cost;
+ }
+
+ /*
+ * Also account for subplan's startup cost. If the subplan is
+ * uncorrelated or undirect correlated, AND its topmost node is one
+ * that materializes its output, assume that we'll only need to pay
+ * its startup cost once; otherwise assume we pay the startup cost
+ * every time.
+ */
+ if (subplan->parParam == NIL &&
+ ExecMaterializesOutput(nodeTag(plan)))
+ sp_cost.startup += plan->startup_cost;
+ else
+ sp_cost.per_tuple += plan->startup_cost;
+ }
+
+ subplan->startup_cost = sp_cost.startup;
+ subplan->per_call_cost = sp_cost.per_tuple;
+}
+
+
+/*
+ * cost_rescan
+ * Given a finished Path, estimate the costs of rescanning it after
+ * having done so the first time. For some Path types a rescan is
+ * cheaper than an original scan (if no parameters change), and this
+ * function embodies knowledge about that. The default is to return
+ * the same costs stored in the Path. (Note that the cost estimates
+ * actually stored in Paths are always for first scans.)
+ *
+ * This function is not currently intended to model effects such as rescans
+ * being cheaper due to disk block caching; what we are concerned with is
+ * plan types wherein the executor caches results explicitly, or doesn't
+ * redo startup calculations, etc.
+ */
+static void
+cost_rescan(PlannerInfo *root, Path *path,
+ Cost *rescan_startup_cost, /* output parameters */
+ Cost *rescan_total_cost)
+{
+ switch (path->pathtype)
+ {
+ case T_FunctionScan:
+
+ /*
+ * Currently, nodeFunctionscan.c always executes the function to
+ * completion before returning any rows, and caches the results in
+ * a tuplestore. So the function eval cost is all startup cost
+ * and isn't paid over again on rescans. However, all run costs
+ * will be paid over again.
+ */
+ *rescan_startup_cost = 0;
+ *rescan_total_cost = path->total_cost - path->startup_cost;
+ break;
+ case T_HashJoin:
+
+ /*
+ * If it's a single-batch join, we don't need to rebuild the hash
+ * table during a rescan.
+ */
+ if (((HashPath *) path)->num_batches == 1)
+ {
+ /* Startup cost is exactly the cost of hash table building */
+ *rescan_startup_cost = 0;
+ *rescan_total_cost = path->total_cost - path->startup_cost;
+ }
+ else
+ {
+ /* Otherwise, no special treatment */
+ *rescan_startup_cost = path->startup_cost;
+ *rescan_total_cost = path->total_cost;
+ }
+ break;
+ case T_CteScan:
+ case T_WorkTableScan:
+ {
+ /*
+ * These plan types materialize their final result in a
+ * tuplestore or tuplesort object. So the rescan cost is only
+ * cpu_tuple_cost per tuple, unless the result is large enough
+ * to spill to disk.
+ */
+ Cost run_cost = cpu_tuple_cost * path->rows;
+ double nbytes = relation_byte_size(path->rows,
+ path->pathtarget->width);
+ long work_mem_bytes = work_mem * 1024L;
+
+ if (nbytes > work_mem_bytes)
+ {
+ /* It will spill, so account for re-read cost */
+ double npages = ceil(nbytes / BLCKSZ);
+
+ run_cost += seq_page_cost * npages;
+ }
+ *rescan_startup_cost = 0;
+ *rescan_total_cost = run_cost;
+ }
+ break;
+ case T_Material:
+ case T_Sort:
+ {
+ /*
+ * These plan types not only materialize their results, but do
+ * not implement qual filtering or projection. So they are
+ * even cheaper to rescan than the ones above. We charge only
+ * cpu_operator_cost per tuple. (Note: keep that in sync with
+ * the run_cost charge in cost_sort, and also see comments in
+ * cost_material before you change it.)
+ */
+ Cost run_cost = cpu_operator_cost * path->rows;
+ double nbytes = relation_byte_size(path->rows,
+ path->pathtarget->width);
+ long work_mem_bytes = work_mem * 1024L;
+
+ if (nbytes > work_mem_bytes)
+ {
+ /* It will spill, so account for re-read cost */
+ double npages = ceil(nbytes / BLCKSZ);
+
+ run_cost += seq_page_cost * npages;
+ }
+ *rescan_startup_cost = 0;
+ *rescan_total_cost = run_cost;
+ }
+ break;
+ case T_Memoize:
+ /* All the hard work is done by cost_memoize_rescan */
+ cost_memoize_rescan(root, (MemoizePath *) path,
+ rescan_startup_cost, rescan_total_cost);
+ break;
+ default:
+ *rescan_startup_cost = path->startup_cost;
+ *rescan_total_cost = path->total_cost;
+ break;
+ }
+}
+
+
+/*
+ * cost_qual_eval
+ * Estimate the CPU costs of evaluating a WHERE clause.
+ * The input can be either an implicitly-ANDed list of boolean
+ * expressions, or a list of RestrictInfo nodes. (The latter is
+ * preferred since it allows caching of the results.)
+ * The result includes both a one-time (startup) component,
+ * and a per-evaluation component.
+ */
+void
+cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
+{
+ cost_qual_eval_context context;
+ ListCell *l;
+
+ context.root = root;
+ context.total.startup = 0;
+ context.total.per_tuple = 0;
+
+ /* We don't charge any cost for the implicit ANDing at top level ... */
+
+ foreach(l, quals)
+ {
+ Node *qual = (Node *) lfirst(l);
+
+ cost_qual_eval_walker(qual, &context);
+ }
+
+ *cost = context.total;
+}
+
+/*
+ * cost_qual_eval_node
+ * As above, for a single RestrictInfo or expression.
+ */
+void
+cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
+{
+ cost_qual_eval_context context;
+
+ context.root = root;
+ context.total.startup = 0;
+ context.total.per_tuple = 0;
+
+ cost_qual_eval_walker(qual, &context);
+
+ *cost = context.total;
+}
+
+static bool
+cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
+{
+ if (node == NULL)
+ return false;
+
+ /*
+ * RestrictInfo nodes contain an eval_cost field reserved for this
+ * routine's use, so that it's not necessary to evaluate the qual clause's
+ * cost more than once. If the clause's cost hasn't been computed yet,
+ * the field's startup value will contain -1.
+ */
+ if (IsA(node, RestrictInfo))
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) node;
+
+ if (rinfo->eval_cost.startup < 0)
+ {
+ cost_qual_eval_context locContext;
+
+ locContext.root = context->root;
+ locContext.total.startup = 0;
+ locContext.total.per_tuple = 0;
+
+ /*
+ * For an OR clause, recurse into the marked-up tree so that we
+ * set the eval_cost for contained RestrictInfos too.
+ */
+ if (rinfo->orclause)
+ cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
+ else
+ cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
+
+ /*
+ * If the RestrictInfo is marked pseudoconstant, it will be tested
+ * only once, so treat its cost as all startup cost.
+ */
+ if (rinfo->pseudoconstant)
+ {
+ /* count one execution during startup */
+ locContext.total.startup += locContext.total.per_tuple;
+ locContext.total.per_tuple = 0;
+ }
+ rinfo->eval_cost = locContext.total;
+ }
+ context->total.startup += rinfo->eval_cost.startup;
+ context->total.per_tuple += rinfo->eval_cost.per_tuple;
+ /* do NOT recurse into children */
+ return false;
+ }
+
+ /*
+ * For each operator or function node in the given tree, we charge the
+ * estimated execution cost given by pg_proc.procost (remember to multiply
+ * this by cpu_operator_cost).
+ *
+ * Vars and Consts are charged zero, and so are boolean operators (AND,
+ * OR, NOT). Simplistic, but a lot better than no model at all.
+ *
+ * Should we try to account for the possibility of short-circuit
+ * evaluation of AND/OR? Probably *not*, because that would make the
+ * results depend on the clause ordering, and we are not in any position
+ * to expect that the current ordering of the clauses is the one that's
+ * going to end up being used. The above per-RestrictInfo caching would
+ * not mix well with trying to re-order clauses anyway.
+ *
+ * Another issue that is entirely ignored here is that if a set-returning
+ * function is below top level in the tree, the functions/operators above
+ * it will need to be evaluated multiple times. In practical use, such
+ * cases arise so seldom as to not be worth the added complexity needed;
+ * moreover, since our rowcount estimates for functions tend to be pretty
+ * phony, the results would also be pretty phony.
+ */
+ if (IsA(node, FuncExpr))
+ {
+ add_function_cost(context->root, ((FuncExpr *) node)->funcid, node,
+ &context->total);
+ }
+ else if (IsA(node, OpExpr) ||
+ IsA(node, DistinctExpr) ||
+ IsA(node, NullIfExpr))
+ {
+ /* rely on struct equivalence to treat these all alike */
+ set_opfuncid((OpExpr *) node);
+ add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node,
+ &context->total);
+ }
+ else if (IsA(node, ScalarArrayOpExpr))
+ {
+ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
+ Node *arraynode = (Node *) lsecond(saop->args);
+ QualCost sacosts;
+ QualCost hcosts;
+ int estarraylen = estimate_array_length(arraynode);
+
+ set_sa_opfuncid(saop);
+ sacosts.startup = sacosts.per_tuple = 0;
+ add_function_cost(context->root, saop->opfuncid, NULL,
+ &sacosts);
+
+ if (OidIsValid(saop->hashfuncid))
+ {
+ /* Handle costs for hashed ScalarArrayOpExpr */
+ hcosts.startup = hcosts.per_tuple = 0;
+
+ add_function_cost(context->root, saop->hashfuncid, NULL, &hcosts);
+ context->total.startup += sacosts.startup + hcosts.startup;
+
+ /* Estimate the cost of building the hashtable. */
+ context->total.startup += estarraylen * hcosts.per_tuple;
+
+ /*
+ * XXX should we charge a little bit for sacosts.per_tuple when
+ * building the table, or is it ok to assume there will be zero
+ * hash collision?
+ */
+
+ /*
+ * Charge for hashtable lookups. Charge a single hash and a
+ * single comparison.
+ */
+ context->total.per_tuple += hcosts.per_tuple + sacosts.per_tuple;
+ }
+ else
+ {
+ /*
+ * Estimate that the operator will be applied to about half of the
+ * array elements before the answer is determined.
+ */
+ context->total.startup += sacosts.startup;
+ context->total.per_tuple += sacosts.per_tuple *
+ estimate_array_length(arraynode) * 0.5;
+ }
+ }
+ else if (IsA(node, Aggref) ||
+ IsA(node, WindowFunc))
+ {
+ /*
+ * Aggref and WindowFunc nodes are (and should be) treated like Vars,
+ * ie, zero execution cost in the current model, because they behave
+ * essentially like Vars at execution. We disregard the costs of
+ * their input expressions for the same reason. The actual execution
+ * costs of the aggregate/window functions and their arguments have to
+ * be factored into plan-node-specific costing of the Agg or WindowAgg
+ * plan node.
+ */
+ return false; /* don't recurse into children */
+ }
+ else if (IsA(node, GroupingFunc))
+ {
+ /* Treat this as having cost 1 */
+ context->total.per_tuple += cpu_operator_cost;
+ return false; /* don't recurse into children */
+ }
+ else if (IsA(node, CoerceViaIO))
+ {
+ CoerceViaIO *iocoerce = (CoerceViaIO *) node;
+ Oid iofunc;
+ Oid typioparam;
+ bool typisvarlena;
+
+ /* check the result type's input function */
+ getTypeInputInfo(iocoerce->resulttype,
+ &iofunc, &typioparam);
+ add_function_cost(context->root, iofunc, NULL,
+ &context->total);
+ /* check the input type's output function */
+ getTypeOutputInfo(exprType((Node *) iocoerce->arg),
+ &iofunc, &typisvarlena);
+ add_function_cost(context->root, iofunc, NULL,
+ &context->total);
+ }
+ else if (IsA(node, ArrayCoerceExpr))
+ {
+ ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
+ QualCost perelemcost;
+
+ cost_qual_eval_node(&perelemcost, (Node *) acoerce->elemexpr,
+ context->root);
+ context->total.startup += perelemcost.startup;
+ if (perelemcost.per_tuple > 0)
+ context->total.per_tuple += perelemcost.per_tuple *
+ estimate_array_length((Node *) acoerce->arg);
+ }
+ else if (IsA(node, RowCompareExpr))
+ {
+ /* Conservatively assume we will check all the columns */
+ RowCompareExpr *rcexpr = (RowCompareExpr *) node;
+ ListCell *lc;
+
+ foreach(lc, rcexpr->opnos)
+ {
+ Oid opid = lfirst_oid(lc);
+
+ add_function_cost(context->root, get_opcode(opid), NULL,
+ &context->total);
+ }
+ }
+ else if (IsA(node, MinMaxExpr) ||
+ IsA(node, SQLValueFunction) ||
+ IsA(node, XmlExpr) ||
+ IsA(node, CoerceToDomain) ||
+ IsA(node, NextValueExpr))
+ {
+ /* Treat all these as having cost 1 */
+ context->total.per_tuple += cpu_operator_cost;
+ }
+ else if (IsA(node, CurrentOfExpr))
+ {
+ /* Report high cost to prevent selection of anything but TID scan */
+ context->total.startup += disable_cost;
+ }
+ else if (IsA(node, SubLink))
+ {
+ /* This routine should not be applied to un-planned expressions */
+ elog(ERROR, "cannot handle unplanned sub-select");
+ }
+ else if (IsA(node, SubPlan))
+ {
+ /*
+ * A subplan node in an expression typically indicates that the
+ * subplan will be executed on each evaluation, so charge accordingly.
+ * (Sub-selects that can be executed as InitPlans have already been
+ * removed from the expression.)
+ */
+ SubPlan *subplan = (SubPlan *) node;
+
+ context->total.startup += subplan->startup_cost;
+ context->total.per_tuple += subplan->per_call_cost;
+
+ /*
+ * We don't want to recurse into the testexpr, because it was already
+ * counted in the SubPlan node's costs. So we're done.
+ */
+ return false;
+ }
+ else if (IsA(node, AlternativeSubPlan))
+ {
+ /*
+ * Arbitrarily use the first alternative plan for costing. (We should
+ * certainly only include one alternative, and we don't yet have
+ * enough information to know which one the executor is most likely to
+ * use.)
+ */
+ AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
+
+ return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
+ context);
+ }
+ else if (IsA(node, PlaceHolderVar))
+ {
+ /*
+ * A PlaceHolderVar should be given cost zero when considering general
+ * expression evaluation costs. The expense of doing the contained
+ * expression is charged as part of the tlist eval costs of the scan
+ * or join where the PHV is first computed (see set_rel_width and
+ * add_placeholders_to_joinrel). If we charged it again here, we'd be
+ * double-counting the cost for each level of plan that the PHV
+ * bubbles up through. Hence, return without recursing into the
+ * phexpr.
+ */
+ return false;
+ }
+
+ /* recurse into children */
+ return expression_tree_walker(node, cost_qual_eval_walker,
+ (void *) context);
+}
+
+/*
+ * get_restriction_qual_cost
+ * Compute evaluation costs of a baserel's restriction quals, plus any
+ * movable join quals that have been pushed down to the scan.
+ * Results are returned into *qpqual_cost.
+ *
+ * This is a convenience subroutine that works for seqscans and other cases
+ * where all the given quals will be evaluated the hard way. It's not useful
+ * for cost_index(), for example, where the index machinery takes care of
+ * some of the quals. We assume baserestrictcost was previously set by
+ * set_baserel_size_estimates().
+ */
+static void
+get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
+ ParamPathInfo *param_info,
+ QualCost *qpqual_cost)
+{
+ if (param_info)
+ {
+ /* Include costs of pushed-down clauses */
+ cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
+
+ qpqual_cost->startup += baserel->baserestrictcost.startup;
+ qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
+ }
+ else
+ *qpqual_cost = baserel->baserestrictcost;
+}
+
+
+/*
+ * compute_semi_anti_join_factors
+ * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
+ * can be expected to scan.
+ *
+ * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
+ * inner rows as soon as it finds a match to the current outer row.
+ * The same happens if we have detected the inner rel is unique.
+ * We should therefore adjust some of the cost components for this effect.
+ * This function computes some estimates needed for these adjustments.
+ * These estimates will be the same regardless of the particular paths used
+ * for the outer and inner relation, so we compute these once and then pass
+ * them to all the join cost estimation functions.
+ *
+ * Input parameters:
+ * joinrel: join relation under consideration
+ * outerrel: outer relation under consideration
+ * innerrel: inner relation under consideration
+ * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
+ * sjinfo: SpecialJoinInfo relevant to this join
+ * restrictlist: join quals
+ * Output parameters:
+ * *semifactors is filled in (see pathnodes.h for field definitions)
+ */
+void
+compute_semi_anti_join_factors(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ SpecialJoinInfo *sjinfo,
+ List *restrictlist,
+ SemiAntiJoinFactors *semifactors)
+{
+ Selectivity jselec;
+ Selectivity nselec;
+ Selectivity avgmatch;
+ SpecialJoinInfo norm_sjinfo;
+ List *joinquals;
+ ListCell *l;
+
+ /*
+ * In an ANTI join, we must ignore clauses that are "pushed down", since
+ * those won't affect the match logic. In a SEMI join, we do not
+ * distinguish joinquals from "pushed down" quals, so just use the whole
+ * restrictinfo list. For other outer join types, we should consider only
+ * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
+ */
+ if (IS_OUTER_JOIN(jointype))
+ {
+ joinquals = NIL;
+ foreach(l, restrictlist)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+
+ if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
+ joinquals = lappend(joinquals, rinfo);
+ }
+ }
+ else
+ joinquals = restrictlist;
+
+ /*
+ * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
+ */
+ jselec = clauselist_selectivity(root,
+ joinquals,
+ 0,
+ (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
+ sjinfo);
+
+ /*
+ * Also get the normal inner-join selectivity of the join clauses.
+ */
+ norm_sjinfo.type = T_SpecialJoinInfo;
+ norm_sjinfo.min_lefthand = outerrel->relids;
+ norm_sjinfo.min_righthand = innerrel->relids;
+ norm_sjinfo.syn_lefthand = outerrel->relids;
+ norm_sjinfo.syn_righthand = innerrel->relids;
+ norm_sjinfo.jointype = JOIN_INNER;
+ /* we don't bother trying to make the remaining fields valid */
+ norm_sjinfo.lhs_strict = false;
+ norm_sjinfo.delay_upper_joins = false;
+ norm_sjinfo.semi_can_btree = false;
+ norm_sjinfo.semi_can_hash = false;
+ norm_sjinfo.semi_operators = NIL;
+ norm_sjinfo.semi_rhs_exprs = NIL;
+
+ nselec = clauselist_selectivity(root,
+ joinquals,
+ 0,
+ JOIN_INNER,
+ &norm_sjinfo);
+
+ /* Avoid leaking a lot of ListCells */
+ if (IS_OUTER_JOIN(jointype))
+ list_free(joinquals);
+
+ /*
+ * jselec can be interpreted as the fraction of outer-rel rows that have
+ * any matches (this is true for both SEMI and ANTI cases). And nselec is
+ * the fraction of the Cartesian product that matches. So, the average
+ * number of matches for each outer-rel row that has at least one match is
+ * nselec * inner_rows / jselec.
+ *
+ * Note: it is correct to use the inner rel's "rows" count here, even
+ * though we might later be considering a parameterized inner path with
+ * fewer rows. This is because we have included all the join clauses in
+ * the selectivity estimate.
+ */
+ if (jselec > 0) /* protect against zero divide */
+ {
+ avgmatch = nselec * innerrel->rows / jselec;
+ /* Clamp to sane range */
+ avgmatch = Max(1.0, avgmatch);
+ }
+ else
+ avgmatch = 1.0;
+
+ semifactors->outer_match_frac = jselec;
+ semifactors->match_count = avgmatch;
+}
+
+/*
+ * has_indexed_join_quals
+ * Check whether all the joinquals of a nestloop join are used as
+ * inner index quals.
+ *
+ * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
+ * indexscan) that uses all the joinquals as indexquals, we can assume that an
+ * unmatched outer tuple is cheap to process, whereas otherwise it's probably
+ * expensive.
+ */
+static bool
+has_indexed_join_quals(NestPath *path)
+{
+ JoinPath *joinpath = &path->jpath;
+ Relids joinrelids = joinpath->path.parent->relids;
+ Path *innerpath = joinpath->innerjoinpath;
+ List *indexclauses;
+ bool found_one;
+ ListCell *lc;
+
+ /* If join still has quals to evaluate, it's not fast */
+ if (joinpath->joinrestrictinfo != NIL)
+ return false;
+ /* Nor if the inner path isn't parameterized at all */
+ if (innerpath->param_info == NULL)
+ return false;
+
+ /* Find the indexclauses list for the inner scan */
+ switch (innerpath->pathtype)
+ {
+ case T_IndexScan:
+ case T_IndexOnlyScan:
+ indexclauses = ((IndexPath *) innerpath)->indexclauses;
+ break;
+ case T_BitmapHeapScan:
+ {
+ /* Accept only a simple bitmap scan, not AND/OR cases */
+ Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
+
+ if (IsA(bmqual, IndexPath))
+ indexclauses = ((IndexPath *) bmqual)->indexclauses;
+ else
+ return false;
+ break;
+ }
+ default:
+
+ /*
+ * If it's not a simple indexscan, it probably doesn't run quickly
+ * for zero rows out, even if it's a parameterized path using all
+ * the joinquals.
+ */
+ return false;
+ }
+
+ /*
+ * Examine the inner path's param clauses. Any that are from the outer
+ * path must be found in the indexclauses list, either exactly or in an
+ * equivalent form generated by equivclass.c. Also, we must find at least
+ * one such clause, else it's a clauseless join which isn't fast.
+ */
+ found_one = false;
+ foreach(lc, innerpath->param_info->ppi_clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ if (join_clause_is_movable_into(rinfo,
+ innerpath->parent->relids,
+ joinrelids))
+ {
+ if (!is_redundant_with_indexclauses(rinfo, indexclauses))
+ return false;
+ found_one = true;
+ }
+ }
+ return found_one;
+}
+
+
+/*
+ * approx_tuple_count
+ * Quick-and-dirty estimation of the number of join rows passing
+ * a set of qual conditions.
+ *
+ * The quals can be either an implicitly-ANDed list of boolean expressions,
+ * or a list of RestrictInfo nodes (typically the latter).
+ *
+ * We intentionally compute the selectivity under JOIN_INNER rules, even
+ * if it's some type of outer join. This is appropriate because we are
+ * trying to figure out how many tuples pass the initial merge or hash
+ * join step.
+ *
+ * This is quick-and-dirty because we bypass clauselist_selectivity, and
+ * simply multiply the independent clause selectivities together. Now
+ * clauselist_selectivity often can't do any better than that anyhow, but
+ * for some situations (such as range constraints) it is smarter. However,
+ * we can't effectively cache the results of clauselist_selectivity, whereas
+ * the individual clause selectivities can be and are cached.
+ *
+ * Since we are only using the results to estimate how many potential
+ * output tuples are generated and passed through qpqual checking, it
+ * seems OK to live with the approximation.
+ */
+static double
+approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
+{
+ double tuples;
+ double outer_tuples = path->outerjoinpath->rows;
+ double inner_tuples = path->innerjoinpath->rows;
+ SpecialJoinInfo sjinfo;
+ Selectivity selec = 1.0;
+ ListCell *l;
+
+ /*
+ * Make up a SpecialJoinInfo for JOIN_INNER semantics.
+ */
+ sjinfo.type = T_SpecialJoinInfo;
+ sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
+ sjinfo.min_righthand = path->innerjoinpath->parent->relids;
+ sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
+ sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
+ sjinfo.jointype = JOIN_INNER;
+ /* we don't bother trying to make the remaining fields valid */
+ sjinfo.lhs_strict = false;
+ sjinfo.delay_upper_joins = false;
+ sjinfo.semi_can_btree = false;
+ sjinfo.semi_can_hash = false;
+ sjinfo.semi_operators = NIL;
+ sjinfo.semi_rhs_exprs = NIL;
+
+ /* Get the approximate selectivity */
+ foreach(l, quals)
+ {
+ Node *qual = (Node *) lfirst(l);
+
+ /* Note that clause_selectivity will be able to cache its result */
+ selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
+ }
+
+ /* Apply it to the input relation sizes */
+ tuples = selec * outer_tuples * inner_tuples;
+
+ return clamp_row_est(tuples);
+}
+
+
+/*
+ * set_baserel_size_estimates
+ * Set the size estimates for the given base relation.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already, and rel->tuples must be set.
+ *
+ * We set the following fields of the rel node:
+ * rows: the estimated number of output tuples (after applying
+ * restriction clauses).
+ * width: the estimated average output tuple width in bytes.
+ * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
+ */
+void
+set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
+{
+ double nrows;
+
+ /* Should only be applied to base relations */
+ Assert(rel->relid > 0);
+
+ nrows = rel->tuples *
+ clauselist_selectivity(root,
+ rel->baserestrictinfo,
+ 0,
+ JOIN_INNER,
+ NULL);
+
+ rel->rows = clamp_row_est(nrows);
+
+ cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
+
+ set_rel_width(root, rel);
+}
+
+/*
+ * get_parameterized_baserel_size
+ * Make a size estimate for a parameterized scan of a base relation.
+ *
+ * 'param_clauses' lists the additional join clauses to be used.
+ *
+ * set_baserel_size_estimates must have been applied already.
+ */
+double
+get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
+ List *param_clauses)
+{
+ List *allclauses;
+ double nrows;
+
+ /*
+ * Estimate the number of rows returned by the parameterized scan, knowing
+ * that it will apply all the extra join clauses as well as the rel's own
+ * restriction clauses. Note that we force the clauses to be treated as
+ * non-join clauses during selectivity estimation.
+ */
+ allclauses = list_concat_copy(param_clauses, rel->baserestrictinfo);
+ nrows = rel->tuples *
+ clauselist_selectivity(root,
+ allclauses,
+ rel->relid, /* do not use 0! */
+ JOIN_INNER,
+ NULL);
+ nrows = clamp_row_est(nrows);
+ /* For safety, make sure result is not more than the base estimate */
+ if (nrows > rel->rows)
+ nrows = rel->rows;
+ return nrows;
+}
+
+/*
+ * set_joinrel_size_estimates
+ * Set the size estimates for the given join relation.
+ *
+ * The rel's targetlist must have been constructed already, and a
+ * restriction clause list that matches the given component rels must
+ * be provided.
+ *
+ * Since there is more than one way to make a joinrel for more than two
+ * base relations, the results we get here could depend on which component
+ * rel pair is provided. In theory we should get the same answers no matter
+ * which pair is provided; in practice, since the selectivity estimation
+ * routines don't handle all cases equally well, we might not. But there's
+ * not much to be done about it. (Would it make sense to repeat the
+ * calculations for each pair of input rels that's encountered, and somehow
+ * average the results? Probably way more trouble than it's worth, and
+ * anyway we must keep the rowcount estimate the same for all paths for the
+ * joinrel.)
+ *
+ * We set only the rows field here. The reltarget field was already set by
+ * build_joinrel_tlist, and baserestrictcost is not used for join rels.
+ */
+void
+set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
+ RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel,
+ SpecialJoinInfo *sjinfo,
+ List *restrictlist)
+{
+ rel->rows = calc_joinrel_size_estimate(root,
+ rel,
+ outer_rel,
+ inner_rel,
+ outer_rel->rows,
+ inner_rel->rows,
+ sjinfo,
+ restrictlist);
+}
+
+/*
+ * get_parameterized_joinrel_size
+ * Make a size estimate for a parameterized scan of a join relation.
+ *
+ * 'rel' is the joinrel under consideration.
+ * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
+ * produce the relations being joined.
+ * 'sjinfo' is any SpecialJoinInfo relevant to this join.
+ * 'restrict_clauses' lists the join clauses that need to be applied at the
+ * join node (including any movable clauses that were moved down to this join,
+ * and not including any movable clauses that were pushed down into the
+ * child paths).
+ *
+ * set_joinrel_size_estimates must have been applied already.
+ */
+double
+get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
+ Path *outer_path,
+ Path *inner_path,
+ SpecialJoinInfo *sjinfo,
+ List *restrict_clauses)
+{
+ double nrows;
+
+ /*
+ * Estimate the number of rows returned by the parameterized join as the
+ * sizes of the input paths times the selectivity of the clauses that have
+ * ended up at this join node.
+ *
+ * As with set_joinrel_size_estimates, the rowcount estimate could depend
+ * on the pair of input paths provided, though ideally we'd get the same
+ * estimate for any pair with the same parameterization.
+ */
+ nrows = calc_joinrel_size_estimate(root,
+ rel,
+ outer_path->parent,
+ inner_path->parent,
+ outer_path->rows,
+ inner_path->rows,
+ sjinfo,
+ restrict_clauses);
+ /* For safety, make sure result is not more than the base estimate */
+ if (nrows > rel->rows)
+ nrows = rel->rows;
+ return nrows;
+}
+
+/*
+ * calc_joinrel_size_estimate
+ * Workhorse for set_joinrel_size_estimates and
+ * get_parameterized_joinrel_size.
+ *
+ * outer_rel/inner_rel are the relations being joined, but they should be
+ * assumed to have sizes outer_rows/inner_rows; those numbers might be less
+ * than what rel->rows says, when we are considering parameterized paths.
+ */
+static double
+calc_joinrel_size_estimate(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel,
+ double outer_rows,
+ double inner_rows,
+ SpecialJoinInfo *sjinfo,
+ List *restrictlist_in)
+{
+ /* This apparently-useless variable dodges a compiler bug in VS2013: */
+ List *restrictlist = restrictlist_in;
+ JoinType jointype = sjinfo->jointype;
+ Selectivity fkselec;
+ Selectivity jselec;
+ Selectivity pselec;
+ double nrows;
+
+ /*
+ * Compute joinclause selectivity. Note that we are only considering
+ * clauses that become restriction clauses at this join level; we are not
+ * double-counting them because they were not considered in estimating the
+ * sizes of the component rels.
+ *
+ * First, see whether any of the joinclauses can be matched to known FK
+ * constraints. If so, drop those clauses from the restrictlist, and
+ * instead estimate their selectivity using FK semantics. (We do this
+ * without regard to whether said clauses are local or "pushed down".
+ * Probably, an FK-matching clause could never be seen as pushed down at
+ * an outer join, since it would be strict and hence would be grounds for
+ * join strength reduction.) fkselec gets the net selectivity for
+ * FK-matching clauses, or 1.0 if there are none.
+ */
+ fkselec = get_foreign_key_join_selectivity(root,
+ outer_rel->relids,
+ inner_rel->relids,
+ sjinfo,
+ &restrictlist);
+
+ /*
+ * For an outer join, we have to distinguish the selectivity of the join's
+ * own clauses (JOIN/ON conditions) from any clauses that were "pushed
+ * down". For inner joins we just count them all as joinclauses.
+ */
+ if (IS_OUTER_JOIN(jointype))
+ {
+ List *joinquals = NIL;
+ List *pushedquals = NIL;
+ ListCell *l;
+
+ /* Grovel through the clauses to separate into two lists */
+ foreach(l, restrictlist)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+
+ if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
+ pushedquals = lappend(pushedquals, rinfo);
+ else
+ joinquals = lappend(joinquals, rinfo);
+ }
+
+ /* Get the separate selectivities */
+ jselec = clauselist_selectivity(root,
+ joinquals,
+ 0,
+ jointype,
+ sjinfo);
+ pselec = clauselist_selectivity(root,
+ pushedquals,
+ 0,
+ jointype,
+ sjinfo);
+
+ /* Avoid leaking a lot of ListCells */
+ list_free(joinquals);
+ list_free(pushedquals);
+ }
+ else
+ {
+ jselec = clauselist_selectivity(root,
+ restrictlist,
+ 0,
+ jointype,
+ sjinfo);
+ pselec = 0.0; /* not used, keep compiler quiet */
+ }
+
+ /*
+ * Basically, we multiply size of Cartesian product by selectivity.
+ *
+ * If we are doing an outer join, take that into account: the joinqual
+ * selectivity has to be clamped using the knowledge that the output must
+ * be at least as large as the non-nullable input. However, any
+ * pushed-down quals are applied after the outer join, so their
+ * selectivity applies fully.
+ *
+ * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
+ * of LHS rows that have matches, and we apply that straightforwardly.
+ */
+ switch (jointype)
+ {
+ case JOIN_INNER:
+ nrows = outer_rows * inner_rows * fkselec * jselec;
+ /* pselec not used */
+ break;
+ case JOIN_LEFT:
+ nrows = outer_rows * inner_rows * fkselec * jselec;
+ if (nrows < outer_rows)
+ nrows = outer_rows;
+ nrows *= pselec;
+ break;
+ case JOIN_FULL:
+ nrows = outer_rows * inner_rows * fkselec * jselec;
+ if (nrows < outer_rows)
+ nrows = outer_rows;
+ if (nrows < inner_rows)
+ nrows = inner_rows;
+ nrows *= pselec;
+ break;
+ case JOIN_SEMI:
+ nrows = outer_rows * fkselec * jselec;
+ /* pselec not used */
+ break;
+ case JOIN_ANTI:
+ nrows = outer_rows * (1.0 - fkselec * jselec);
+ nrows *= pselec;
+ break;
+ default:
+ /* other values not expected here */
+ elog(ERROR, "unrecognized join type: %d", (int) jointype);
+ nrows = 0; /* keep compiler quiet */
+ break;
+ }
+
+ return clamp_row_est(nrows);
+}
+
+/*
+ * get_foreign_key_join_selectivity
+ * Estimate join selectivity for foreign-key-related clauses.
+ *
+ * Remove any clauses that can be matched to FK constraints from *restrictlist,
+ * and return a substitute estimate of their selectivity. 1.0 is returned
+ * when there are no such clauses.
+ *
+ * The reason for treating such clauses specially is that we can get better
+ * estimates this way than by relying on clauselist_selectivity(), especially
+ * for multi-column FKs where that function's assumption that the clauses are
+ * independent falls down badly. But even with single-column FKs, we may be
+ * able to get a better answer when the pg_statistic stats are missing or out
+ * of date.
+ */
+static Selectivity
+get_foreign_key_join_selectivity(PlannerInfo *root,
+ Relids outer_relids,
+ Relids inner_relids,
+ SpecialJoinInfo *sjinfo,
+ List **restrictlist)
+{
+ Selectivity fkselec = 1.0;
+ JoinType jointype = sjinfo->jointype;
+ List *worklist = *restrictlist;
+ ListCell *lc;
+
+ /* Consider each FK constraint that is known to match the query */
+ foreach(lc, root->fkey_list)
+ {
+ ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
+ bool ref_is_outer;
+ List *removedlist;
+ ListCell *cell;
+
+ /*
+ * This FK is not relevant unless it connects a baserel on one side of
+ * this join to a baserel on the other side.
+ */
+ if (bms_is_member(fkinfo->con_relid, outer_relids) &&
+ bms_is_member(fkinfo->ref_relid, inner_relids))
+ ref_is_outer = false;
+ else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
+ bms_is_member(fkinfo->con_relid, inner_relids))
+ ref_is_outer = true;
+ else
+ continue;
+
+ /*
+ * If we're dealing with a semi/anti join, and the FK's referenced
+ * relation is on the outside, then knowledge of the FK doesn't help
+ * us figure out what we need to know (which is the fraction of outer
+ * rows that have matches). On the other hand, if the referenced rel
+ * is on the inside, then all outer rows must have matches in the
+ * referenced table (ignoring nulls). But any restriction or join
+ * clauses that filter that table will reduce the fraction of matches.
+ * We can account for restriction clauses, but it's too hard to guess
+ * how many table rows would get through a join that's inside the RHS.
+ * Hence, if either case applies, punt and ignore the FK.
+ */
+ if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
+ (ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
+ continue;
+
+ /*
+ * Modify the restrictlist by removing clauses that match the FK (and
+ * putting them into removedlist instead). It seems unsafe to modify
+ * the originally-passed List structure, so we make a shallow copy the
+ * first time through.
+ */
+ if (worklist == *restrictlist)
+ worklist = list_copy(worklist);
+
+ removedlist = NIL;
+ foreach(cell, worklist)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
+ bool remove_it = false;
+ int i;
+
+ /* Drop this clause if it matches any column of the FK */
+ for (i = 0; i < fkinfo->nkeys; i++)
+ {
+ if (rinfo->parent_ec)
+ {
+ /*
+ * EC-derived clauses can only match by EC. It is okay to
+ * consider any clause derived from the same EC as
+ * matching the FK: even if equivclass.c chose to generate
+ * a clause equating some other pair of Vars, it could
+ * have generated one equating the FK's Vars. So for
+ * purposes of estimation, we can act as though it did so.
+ *
+ * Note: checking parent_ec is a bit of a cheat because
+ * there are EC-derived clauses that don't have parent_ec
+ * set; but such clauses must compare expressions that
+ * aren't just Vars, so they cannot match the FK anyway.
+ */
+ if (fkinfo->eclass[i] == rinfo->parent_ec)
+ {
+ remove_it = true;
+ break;
+ }
+ }
+ else
+ {
+ /*
+ * Otherwise, see if rinfo was previously matched to FK as
+ * a "loose" clause.
+ */
+ if (list_member_ptr(fkinfo->rinfos[i], rinfo))
+ {
+ remove_it = true;
+ break;
+ }
+ }
+ }
+ if (remove_it)
+ {
+ worklist = foreach_delete_current(worklist, cell);
+ removedlist = lappend(removedlist, rinfo);
+ }
+ }
+
+ /*
+ * If we failed to remove all the matching clauses we expected to
+ * find, chicken out and ignore this FK; applying its selectivity
+ * might result in double-counting. Put any clauses we did manage to
+ * remove back into the worklist.
+ *
+ * Since the matching clauses are known not outerjoin-delayed, they
+ * would normally have appeared in the initial joinclause list. If we
+ * didn't find them, there are two possibilities:
+ *
+ * 1. If the FK match is based on an EC that is ec_has_const, it won't
+ * have generated any join clauses at all. We discount such ECs while
+ * checking to see if we have "all" the clauses. (Below, we'll adjust
+ * the selectivity estimate for this case.)
+ *
+ * 2. The clauses were matched to some other FK in a previous
+ * iteration of this loop, and thus removed from worklist. (A likely
+ * case is that two FKs are matched to the same EC; there will be only
+ * one EC-derived clause in the initial list, so the first FK will
+ * consume it.) Applying both FKs' selectivity independently risks
+ * underestimating the join size; in particular, this would undo one
+ * of the main things that ECs were invented for, namely to avoid
+ * double-counting the selectivity of redundant equality conditions.
+ * Later we might think of a reasonable way to combine the estimates,
+ * but for now, just punt, since this is a fairly uncommon situation.
+ */
+ if (removedlist == NIL ||
+ list_length(removedlist) !=
+ (fkinfo->nmatched_ec - fkinfo->nconst_ec + fkinfo->nmatched_ri))
+ {
+ worklist = list_concat(worklist, removedlist);
+ continue;
+ }
+
+ /*
+ * Finally we get to the payoff: estimate selectivity using the
+ * knowledge that each referencing row will match exactly one row in
+ * the referenced table.
+ *
+ * XXX that's not true in the presence of nulls in the referencing
+ * column(s), so in principle we should derate the estimate for those.
+ * However (1) if there are any strict restriction clauses for the
+ * referencing column(s) elsewhere in the query, derating here would
+ * be double-counting the null fraction, and (2) it's not very clear
+ * how to combine null fractions for multiple referencing columns. So
+ * we do nothing for now about correcting for nulls.
+ *
+ * XXX another point here is that if either side of an FK constraint
+ * is an inheritance parent, we estimate as though the constraint
+ * covers all its children as well. This is not an unreasonable
+ * assumption for a referencing table, ie the user probably applied
+ * identical constraints to all child tables (though perhaps we ought
+ * to check that). But it's not possible to have done that for a
+ * referenced table. Fortunately, precisely because that doesn't
+ * work, it is uncommon in practice to have an FK referencing a parent
+ * table. So, at least for now, disregard inheritance here.
+ */
+ if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
+ {
+ /*
+ * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
+ * referenced table is exactly the inside of the join. The join
+ * selectivity is defined as the fraction of LHS rows that have
+ * matches. The FK implies that every LHS row has a match *in the
+ * referenced table*; but any restriction clauses on it will
+ * reduce the number of matches. Hence we take the join
+ * selectivity as equal to the selectivity of the table's
+ * restriction clauses, which is rows / tuples; but we must guard
+ * against tuples == 0.
+ */
+ RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
+ double ref_tuples = Max(ref_rel->tuples, 1.0);
+
+ fkselec *= ref_rel->rows / ref_tuples;
+ }
+ else
+ {
+ /*
+ * Otherwise, selectivity is exactly 1/referenced-table-size; but
+ * guard against tuples == 0. Note we should use the raw table
+ * tuple count, not any estimate of its filtered or joined size.
+ */
+ RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
+ double ref_tuples = Max(ref_rel->tuples, 1.0);
+
+ fkselec *= 1.0 / ref_tuples;
+ }
+
+ /*
+ * If any of the FK columns participated in ec_has_const ECs, then
+ * equivclass.c will have generated "var = const" restrictions for
+ * each side of the join, thus reducing the sizes of both input
+ * relations. Taking the fkselec at face value would amount to
+ * double-counting the selectivity of the constant restriction for the
+ * referencing Var. Hence, look for the restriction clause(s) that
+ * were applied to the referencing Var(s), and divide out their
+ * selectivity to correct for this.
+ */
+ if (fkinfo->nconst_ec > 0)
+ {
+ for (int i = 0; i < fkinfo->nkeys; i++)
+ {
+ EquivalenceClass *ec = fkinfo->eclass[i];
+
+ if (ec && ec->ec_has_const)
+ {
+ EquivalenceMember *em = fkinfo->fk_eclass_member[i];
+ RestrictInfo *rinfo = find_derived_clause_for_ec_member(ec,
+ em);
+
+ if (rinfo)
+ {
+ Selectivity s0;
+
+ s0 = clause_selectivity(root,
+ (Node *) rinfo,
+ 0,
+ jointype,
+ sjinfo);
+ if (s0 > 0)
+ fkselec /= s0;
+ }
+ }
+ }
+ }
+ }
+
+ *restrictlist = worklist;
+ CLAMP_PROBABILITY(fkselec);
+ return fkselec;
+}
+
+/*
+ * set_subquery_size_estimates
+ * Set the size estimates for a base relation that is a subquery.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already, and the Paths for the subquery must have been completed.
+ * We look at the subquery's PlannerInfo to extract data.
+ *
+ * We set the same fields as set_baserel_size_estimates.
+ */
+void
+set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
+{
+ PlannerInfo *subroot = rel->subroot;
+ RelOptInfo *sub_final_rel;
+ ListCell *lc;
+
+ /* Should only be applied to base relations that are subqueries */
+ Assert(rel->relid > 0);
+ Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY);
+
+ /*
+ * Copy raw number of output rows from subquery. All of its paths should
+ * have the same output rowcount, so just look at cheapest-total.
+ */
+ sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
+ rel->tuples = sub_final_rel->cheapest_total_path->rows;
+
+ /*
+ * Compute per-output-column width estimates by examining the subquery's
+ * targetlist. For any output that is a plain Var, get the width estimate
+ * that was made while planning the subquery. Otherwise, we leave it to
+ * set_rel_width to fill in a datatype-based default estimate.
+ */
+ foreach(lc, subroot->parse->targetList)
+ {
+ TargetEntry *te = lfirst_node(TargetEntry, lc);
+ Node *texpr = (Node *) te->expr;
+ int32 item_width = 0;
+
+ /* junk columns aren't visible to upper query */
+ if (te->resjunk)
+ continue;
+
+ /*
+ * The subquery could be an expansion of a view that's had columns
+ * added to it since the current query was parsed, so that there are
+ * non-junk tlist columns in it that don't correspond to any column
+ * visible at our query level. Ignore such columns.
+ */
+ if (te->resno < rel->min_attr || te->resno > rel->max_attr)
+ continue;
+
+ /*
+ * XXX This currently doesn't work for subqueries containing set
+ * operations, because the Vars in their tlists are bogus references
+ * to the first leaf subquery, which wouldn't give the right answer
+ * even if we could still get to its PlannerInfo.
+ *
+ * Also, the subquery could be an appendrel for which all branches are
+ * known empty due to constraint exclusion, in which case
+ * set_append_rel_pathlist will have left the attr_widths set to zero.
+ *
+ * In either case, we just leave the width estimate zero until
+ * set_rel_width fixes it.
+ */
+ if (IsA(texpr, Var) &&
+ subroot->parse->setOperations == NULL)
+ {
+ Var *var = (Var *) texpr;
+ RelOptInfo *subrel = find_base_rel(subroot, var->varno);
+
+ item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
+ }
+ rel->attr_widths[te->resno - rel->min_attr] = item_width;
+ }
+
+ /* Now estimate number of output rows, etc */
+ set_baserel_size_estimates(root, rel);
+}
+
+/*
+ * set_function_size_estimates
+ * Set the size estimates for a base relation that is a function call.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already.
+ *
+ * We set the same fields as set_baserel_size_estimates.
+ */
+void
+set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
+{
+ RangeTblEntry *rte;
+ ListCell *lc;
+
+ /* Should only be applied to base relations that are functions */
+ Assert(rel->relid > 0);
+ rte = planner_rt_fetch(rel->relid, root);
+ Assert(rte->rtekind == RTE_FUNCTION);
+
+ /*
+ * Estimate number of rows the functions will return. The rowcount of the
+ * node is that of the largest function result.
+ */
+ rel->tuples = 0;
+ foreach(lc, rte->functions)
+ {
+ RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
+ double ntup = expression_returns_set_rows(root, rtfunc->funcexpr);
+
+ if (ntup > rel->tuples)
+ rel->tuples = ntup;
+ }
+
+ /* Now estimate number of output rows, etc */
+ set_baserel_size_estimates(root, rel);
+}
+
+/*
+ * set_function_size_estimates
+ * Set the size estimates for a base relation that is a function call.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already.
+ *
+ * We set the same fields as set_tablefunc_size_estimates.
+ */
+void
+set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
+{
+ /* Should only be applied to base relations that are functions */
+ Assert(rel->relid > 0);
+ Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC);
+
+ rel->tuples = 100;
+
+ /* Now estimate number of output rows, etc */
+ set_baserel_size_estimates(root, rel);
+}
+
+/*
+ * set_values_size_estimates
+ * Set the size estimates for a base relation that is a values list.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already.
+ *
+ * We set the same fields as set_baserel_size_estimates.
+ */
+void
+set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
+{
+ RangeTblEntry *rte;
+
+ /* Should only be applied to base relations that are values lists */
+ Assert(rel->relid > 0);
+ rte = planner_rt_fetch(rel->relid, root);
+ Assert(rte->rtekind == RTE_VALUES);
+
+ /*
+ * Estimate number of rows the values list will return. We know this
+ * precisely based on the list length (well, barring set-returning
+ * functions in list items, but that's a refinement not catered for
+ * anywhere else either).
+ */
+ rel->tuples = list_length(rte->values_lists);
+
+ /* Now estimate number of output rows, etc */
+ set_baserel_size_estimates(root, rel);
+}
+
+/*
+ * set_cte_size_estimates
+ * Set the size estimates for a base relation that is a CTE reference.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already, and we need an estimate of the number of rows returned by the CTE
+ * (if a regular CTE) or the non-recursive term (if a self-reference).
+ *
+ * We set the same fields as set_baserel_size_estimates.
+ */
+void
+set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
+{
+ RangeTblEntry *rte;
+
+ /* Should only be applied to base relations that are CTE references */
+ Assert(rel->relid > 0);
+ rte = planner_rt_fetch(rel->relid, root);
+ Assert(rte->rtekind == RTE_CTE);
+
+ if (rte->self_reference)
+ {
+ /*
+ * In a self-reference, we assume the average worktable size is a
+ * multiple of the nonrecursive term's size. The best multiplier will
+ * vary depending on query "fan-out", so make its value adjustable.
+ */
+ rel->tuples = clamp_row_est(recursive_worktable_factor * cte_rows);
+ }
+ else
+ {
+ /* Otherwise just believe the CTE's rowcount estimate */
+ rel->tuples = cte_rows;
+ }
+
+ /* Now estimate number of output rows, etc */
+ set_baserel_size_estimates(root, rel);
+}
+
+/*
+ * set_namedtuplestore_size_estimates
+ * Set the size estimates for a base relation that is a tuplestore reference.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already.
+ *
+ * We set the same fields as set_baserel_size_estimates.
+ */
+void
+set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
+{
+ RangeTblEntry *rte;
+
+ /* Should only be applied to base relations that are tuplestore references */
+ Assert(rel->relid > 0);
+ rte = planner_rt_fetch(rel->relid, root);
+ Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
+
+ /*
+ * Use the estimate provided by the code which is generating the named
+ * tuplestore. In some cases, the actual number might be available; in
+ * others the same plan will be re-used, so a "typical" value might be
+ * estimated and used.
+ */
+ rel->tuples = rte->enrtuples;
+ if (rel->tuples < 0)
+ rel->tuples = 1000;
+
+ /* Now estimate number of output rows, etc */
+ set_baserel_size_estimates(root, rel);
+}
+
+/*
+ * set_result_size_estimates
+ * Set the size estimates for an RTE_RESULT base relation
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already.
+ *
+ * We set the same fields as set_baserel_size_estimates.
+ */
+void
+set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
+{
+ /* Should only be applied to RTE_RESULT base relations */
+ Assert(rel->relid > 0);
+ Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_RESULT);
+
+ /* RTE_RESULT always generates a single row, natively */
+ rel->tuples = 1;
+
+ /* Now estimate number of output rows, etc */
+ set_baserel_size_estimates(root, rel);
+}
+
+/*
+ * set_foreign_size_estimates
+ * Set the size estimates for a base relation that is a foreign table.
+ *
+ * There is not a whole lot that we can do here; the foreign-data wrapper
+ * is responsible for producing useful estimates. We can do a decent job
+ * of estimating baserestrictcost, so we set that, and we also set up width
+ * using what will be purely datatype-driven estimates from the targetlist.
+ * There is no way to do anything sane with the rows value, so we just put
+ * a default estimate and hope that the wrapper can improve on it. The
+ * wrapper's GetForeignRelSize function will be called momentarily.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already.
+ */
+void
+set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
+{
+ /* Should only be applied to base relations */
+ Assert(rel->relid > 0);
+
+ rel->rows = 1000; /* entirely bogus default estimate */
+
+ cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
+
+ set_rel_width(root, rel);
+}
+
+
+/*
+ * set_rel_width
+ * Set the estimated output width of a base relation.
+ *
+ * The estimated output width is the sum of the per-attribute width estimates
+ * for the actually-referenced columns, plus any PHVs or other expressions
+ * that have to be calculated at this relation. This is the amount of data
+ * we'd need to pass upwards in case of a sort, hash, etc.
+ *
+ * This function also sets reltarget->cost, so it's a bit misnamed now.
+ *
+ * NB: this works best on plain relations because it prefers to look at
+ * real Vars. For subqueries, set_subquery_size_estimates will already have
+ * copied up whatever per-column estimates were made within the subquery,
+ * and for other types of rels there isn't much we can do anyway. We fall
+ * back on (fairly stupid) datatype-based width estimates if we can't get
+ * any better number.
+ *
+ * The per-attribute width estimates are cached for possible re-use while
+ * building join relations or post-scan/join pathtargets.
+ */
+static void
+set_rel_width(PlannerInfo *root, RelOptInfo *rel)
+{
+ Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
+ int32 tuple_width = 0;
+ bool have_wholerow_var = false;
+ ListCell *lc;
+
+ /* Vars are assumed to have cost zero, but other exprs do not */
+ rel->reltarget->cost.startup = 0;
+ rel->reltarget->cost.per_tuple = 0;
+
+ foreach(lc, rel->reltarget->exprs)
+ {
+ Node *node = (Node *) lfirst(lc);
+
+ /*
+ * Ordinarily, a Var in a rel's targetlist must belong to that rel;
+ * but there are corner cases involving LATERAL references where that
+ * isn't so. If the Var has the wrong varno, fall through to the
+ * generic case (it doesn't seem worth the trouble to be any smarter).
+ */
+ if (IsA(node, Var) &&
+ ((Var *) node)->varno == rel->relid)
+ {
+ Var *var = (Var *) node;
+ int ndx;
+ int32 item_width;
+
+ Assert(var->varattno >= rel->min_attr);
+ Assert(var->varattno <= rel->max_attr);
+
+ ndx = var->varattno - rel->min_attr;
+
+ /*
+ * If it's a whole-row Var, we'll deal with it below after we have
+ * already cached as many attr widths as possible.
+ */
+ if (var->varattno == 0)
+ {
+ have_wholerow_var = true;
+ continue;
+ }
+
+ /*
+ * The width may have been cached already (especially if it's a
+ * subquery), so don't duplicate effort.
+ */
+ if (rel->attr_widths[ndx] > 0)
+ {
+ tuple_width += rel->attr_widths[ndx];
+ continue;
+ }
+
+ /* Try to get column width from statistics */
+ if (reloid != InvalidOid && var->varattno > 0)
+ {
+ item_width = get_attavgwidth(reloid, var->varattno);
+ if (item_width > 0)
+ {
+ rel->attr_widths[ndx] = item_width;
+ tuple_width += item_width;
+ continue;
+ }
+ }
+
+ /*
+ * Not a plain relation, or can't find statistics for it. Estimate
+ * using just the type info.
+ */
+ item_width = get_typavgwidth(var->vartype, var->vartypmod);
+ Assert(item_width > 0);
+ rel->attr_widths[ndx] = item_width;
+ tuple_width += item_width;
+ }
+ else if (IsA(node, PlaceHolderVar))
+ {
+ /*
+ * We will need to evaluate the PHV's contained expression while
+ * scanning this rel, so be sure to include it in reltarget->cost.
+ */
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+ PlaceHolderInfo *phinfo = find_placeholder_info(root, phv, false);
+ QualCost cost;
+
+ tuple_width += phinfo->ph_width;
+ cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
+ rel->reltarget->cost.startup += cost.startup;
+ rel->reltarget->cost.per_tuple += cost.per_tuple;
+ }
+ else
+ {
+ /*
+ * We could be looking at an expression pulled up from a subquery,
+ * or a ROW() representing a whole-row child Var, etc. Do what we
+ * can using the expression type information.
+ */
+ int32 item_width;
+ QualCost cost;
+
+ item_width = get_typavgwidth(exprType(node), exprTypmod(node));
+ Assert(item_width > 0);
+ tuple_width += item_width;
+ /* Not entirely clear if we need to account for cost, but do so */
+ cost_qual_eval_node(&cost, node, root);
+ rel->reltarget->cost.startup += cost.startup;
+ rel->reltarget->cost.per_tuple += cost.per_tuple;
+ }
+ }
+
+ /*
+ * If we have a whole-row reference, estimate its width as the sum of
+ * per-column widths plus heap tuple header overhead.
+ */
+ if (have_wholerow_var)
+ {
+ int32 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
+
+ if (reloid != InvalidOid)
+ {
+ /* Real relation, so estimate true tuple width */
+ wholerow_width += get_relation_data_width(reloid,
+ rel->attr_widths - rel->min_attr);
+ }
+ else
+ {
+ /* Do what we can with info for a phony rel */
+ AttrNumber i;
+
+ for (i = 1; i <= rel->max_attr; i++)
+ wholerow_width += rel->attr_widths[i - rel->min_attr];
+ }
+
+ rel->attr_widths[0 - rel->min_attr] = wholerow_width;
+
+ /*
+ * Include the whole-row Var as part of the output tuple. Yes, that
+ * really is what happens at runtime.
+ */
+ tuple_width += wholerow_width;
+ }
+
+ Assert(tuple_width >= 0);
+ rel->reltarget->width = tuple_width;
+}
+
+/*
+ * set_pathtarget_cost_width
+ * Set the estimated eval cost and output width of a PathTarget tlist.
+ *
+ * As a notational convenience, returns the same PathTarget pointer passed in.
+ *
+ * Most, though not quite all, uses of this function occur after we've run
+ * set_rel_width() for base relations; so we can usually obtain cached width
+ * estimates for Vars. If we can't, fall back on datatype-based width
+ * estimates. Present early-planning uses of PathTargets don't need accurate
+ * widths badly enough to justify going to the catalogs for better data.
+ */
+PathTarget *
+set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
+{
+ int32 tuple_width = 0;
+ ListCell *lc;
+
+ /* Vars are assumed to have cost zero, but other exprs do not */
+ target->cost.startup = 0;
+ target->cost.per_tuple = 0;
+
+ foreach(lc, target->exprs)
+ {
+ Node *node = (Node *) lfirst(lc);
+
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+ int32 item_width;
+
+ /* We should not see any upper-level Vars here */
+ Assert(var->varlevelsup == 0);
+
+ /* Try to get data from RelOptInfo cache */
+ if (!IS_SPECIAL_VARNO(var->varno) &&
+ var->varno < root->simple_rel_array_size)
+ {
+ RelOptInfo *rel = root->simple_rel_array[var->varno];
+
+ if (rel != NULL &&
+ var->varattno >= rel->min_attr &&
+ var->varattno <= rel->max_attr)
+ {
+ int ndx = var->varattno - rel->min_attr;
+
+ if (rel->attr_widths[ndx] > 0)
+ {
+ tuple_width += rel->attr_widths[ndx];
+ continue;
+ }
+ }
+ }
+
+ /*
+ * No cached data available, so estimate using just the type info.
+ */
+ item_width = get_typavgwidth(var->vartype, var->vartypmod);
+ Assert(item_width > 0);
+ tuple_width += item_width;
+ }
+ else
+ {
+ /*
+ * Handle general expressions using type info.
+ */
+ int32 item_width;
+ QualCost cost;
+
+ item_width = get_typavgwidth(exprType(node), exprTypmod(node));
+ Assert(item_width > 0);
+ tuple_width += item_width;
+
+ /* Account for cost, too */
+ cost_qual_eval_node(&cost, node, root);
+ target->cost.startup += cost.startup;
+ target->cost.per_tuple += cost.per_tuple;
+ }
+ }
+
+ Assert(tuple_width >= 0);
+ target->width = tuple_width;
+
+ return target;
+}
+
+/*
+ * relation_byte_size
+ * Estimate the storage space in bytes for a given number of tuples
+ * of a given width (size in bytes).
+ */
+static double
+relation_byte_size(double tuples, int width)
+{
+ return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
+}
+
+/*
+ * page_size
+ * Returns an estimate of the number of pages covered by a given
+ * number of tuples of a given width (size in bytes).
+ */
+static double
+page_size(double tuples, int width)
+{
+ return ceil(relation_byte_size(tuples, width) / BLCKSZ);
+}
+
+/*
+ * Estimate the fraction of the work that each worker will do given the
+ * number of workers budgeted for the path.
+ */
+static double
+get_parallel_divisor(Path *path)
+{
+ double parallel_divisor = path->parallel_workers;
+
+ /*
+ * Early experience with parallel query suggests that when there is only
+ * one worker, the leader often makes a very substantial contribution to
+ * executing the parallel portion of the plan, but as more workers are
+ * added, it does less and less, because it's busy reading tuples from the
+ * workers and doing whatever non-parallel post-processing is needed. By
+ * the time we reach 4 workers, the leader no longer makes a meaningful
+ * contribution. Thus, for now, estimate that the leader spends 30% of
+ * its time servicing each worker, and the remainder executing the
+ * parallel plan.
+ */
+ if (parallel_leader_participation)
+ {
+ double leader_contribution;
+
+ leader_contribution = 1.0 - (0.3 * path->parallel_workers);
+ if (leader_contribution > 0)
+ parallel_divisor += leader_contribution;
+ }
+
+ return parallel_divisor;
+}
+
+/*
+ * compute_bitmap_pages
+ *
+ * compute number of pages fetched from heap in bitmap heap scan.
+ */
+double
+compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual,
+ int loop_count, Cost *cost, double *tuple)
+{
+ Cost indexTotalCost;
+ Selectivity indexSelectivity;
+ double T;
+ double pages_fetched;
+ double tuples_fetched;
+ double heap_pages;
+ long maxentries;
+
+ /*
+ * Fetch total cost of obtaining the bitmap, as well as its total
+ * selectivity.
+ */
+ cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
+
+ /*
+ * Estimate number of main-table pages fetched.
+ */
+ tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
+
+ T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
+
+ /*
+ * For a single scan, the number of heap pages that need to be fetched is
+ * the same as the Mackert and Lohman formula for the case T <= b (ie, no
+ * re-reads needed).
+ */
+ pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
+
+ /*
+ * Calculate the number of pages fetched from the heap. Then based on
+ * current work_mem estimate get the estimated maxentries in the bitmap.
+ * (Note that we always do this calculation based on the number of pages
+ * that would be fetched in a single iteration, even if loop_count > 1.
+ * That's correct, because only that number of entries will be stored in
+ * the bitmap at one time.)
+ */
+ heap_pages = Min(pages_fetched, baserel->pages);
+ maxentries = tbm_calculate_entries(work_mem * 1024L);
+
+ if (loop_count > 1)
+ {
+ /*
+ * For repeated bitmap scans, scale up the number of tuples fetched in
+ * the Mackert and Lohman formula by the number of scans, so that we
+ * estimate the number of pages fetched by all the scans. Then
+ * pro-rate for one scan.
+ */
+ pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
+ baserel->pages,
+ get_indexpath_pages(bitmapqual),
+ root);
+ pages_fetched /= loop_count;
+ }
+
+ if (pages_fetched >= T)
+ pages_fetched = T;
+ else
+ pages_fetched = ceil(pages_fetched);
+
+ if (maxentries < heap_pages)
+ {
+ double exact_pages;
+ double lossy_pages;
+
+ /*
+ * Crude approximation of the number of lossy pages. Because of the
+ * way tbm_lossify() is coded, the number of lossy pages increases
+ * very sharply as soon as we run short of memory; this formula has
+ * that property and seems to perform adequately in testing, but it's
+ * possible we could do better somehow.
+ */
+ lossy_pages = Max(0, heap_pages - maxentries / 2);
+ exact_pages = heap_pages - lossy_pages;
+
+ /*
+ * If there are lossy pages then recompute the number of tuples
+ * processed by the bitmap heap node. We assume here that the chance
+ * of a given tuple coming from an exact page is the same as the
+ * chance that a given page is exact. This might not be true, but
+ * it's not clear how we can do any better.
+ */
+ if (lossy_pages > 0)
+ tuples_fetched =
+ clamp_row_est(indexSelectivity *
+ (exact_pages / heap_pages) * baserel->tuples +
+ (lossy_pages / heap_pages) * baserel->tuples);
+ }
+
+ if (cost)
+ *cost = indexTotalCost;
+ if (tuple)
+ *tuple = tuples_fetched;
+
+ return pages_fetched;
+}
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
new file mode 100644
index 0000000..9f39f46
--- /dev/null
+++ b/src/backend/optimizer/path/equivclass.c
@@ -0,0 +1,3226 @@
+/*-------------------------------------------------------------------------
+ *
+ * equivclass.c
+ * Routines for managing EquivalenceClasses
+ *
+ * See src/backend/optimizer/README for discussion of EquivalenceClasses.
+ *
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/path/equivclass.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <limits.h>
+
+#include "access/stratnum.h"
+#include "catalog/pg_type.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/appendinfo.h"
+#include "optimizer/clauses.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/planmain.h"
+#include "optimizer/restrictinfo.h"
+#include "utils/lsyscache.h"
+
+
+static EquivalenceMember *add_eq_member(EquivalenceClass *ec,
+ Expr *expr, Relids relids, Relids nullable_relids,
+ bool is_child, Oid datatype);
+static bool is_exprlist_member(Expr *node, List *exprs);
+static void generate_base_implied_equalities_const(PlannerInfo *root,
+ EquivalenceClass *ec);
+static void generate_base_implied_equalities_no_const(PlannerInfo *root,
+ EquivalenceClass *ec);
+static void generate_base_implied_equalities_broken(PlannerInfo *root,
+ EquivalenceClass *ec);
+static List *generate_join_implied_equalities_normal(PlannerInfo *root,
+ EquivalenceClass *ec,
+ Relids join_relids,
+ Relids outer_relids,
+ Relids inner_relids);
+static List *generate_join_implied_equalities_broken(PlannerInfo *root,
+ EquivalenceClass *ec,
+ Relids nominal_join_relids,
+ Relids outer_relids,
+ Relids nominal_inner_relids,
+ RelOptInfo *inner_rel);
+static Oid select_equality_operator(EquivalenceClass *ec,
+ Oid lefttype, Oid righttype);
+static RestrictInfo *create_join_clause(PlannerInfo *root,
+ EquivalenceClass *ec, Oid opno,
+ EquivalenceMember *leftem,
+ EquivalenceMember *rightem,
+ EquivalenceClass *parent_ec);
+static bool reconsider_outer_join_clause(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ bool outer_on_left);
+static bool reconsider_full_join_clause(PlannerInfo *root,
+ RestrictInfo *rinfo);
+static Bitmapset *get_eclass_indexes_for_relids(PlannerInfo *root,
+ Relids relids);
+static Bitmapset *get_common_eclass_indexes(PlannerInfo *root, Relids relids1,
+ Relids relids2);
+
+
+/*
+ * process_equivalence
+ * The given clause has a mergejoinable operator and can be applied without
+ * any delay by an outer join, so its two sides can be considered equal
+ * anywhere they are both computable; moreover that equality can be
+ * extended transitively. Record this knowledge in the EquivalenceClass
+ * data structure, if applicable. Returns true if successful, false if not
+ * (in which case caller should treat the clause as ordinary, not an
+ * equivalence).
+ *
+ * In some cases, although we cannot convert a clause into EquivalenceClass
+ * knowledge, we can still modify it to a more useful form than the original.
+ * Then, *p_restrictinfo will be replaced by a new RestrictInfo, which is what
+ * the caller should use for further processing.
+ *
+ * If below_outer_join is true, then the clause was found below the nullable
+ * side of an outer join, so its sides might validly be both NULL rather than
+ * strictly equal. We can still deduce equalities in such cases, but we take
+ * care to mark an EquivalenceClass if it came from any such clauses. Also,
+ * we have to check that both sides are either pseudo-constants or strict
+ * functions of Vars, else they might not both go to NULL above the outer
+ * join. (This is the main reason why we need a failure return. It's more
+ * convenient to check this case here than at the call sites...)
+ *
+ * We also reject proposed equivalence clauses if they contain leaky functions
+ * and have security_level above zero. The EC evaluation rules require us to
+ * apply certain tests at certain joining levels, and we can't tolerate
+ * delaying any test on security_level grounds. By rejecting candidate clauses
+ * that might require security delays, we ensure it's safe to apply an EC
+ * clause as soon as it's supposed to be applied.
+ *
+ * On success return, we have also initialized the clause's left_ec/right_ec
+ * fields to point to the EquivalenceClass representing it. This saves lookup
+ * effort later.
+ *
+ * Note: constructing merged EquivalenceClasses is a standard UNION-FIND
+ * problem, for which there exist better data structures than simple lists.
+ * If this code ever proves to be a bottleneck then it could be sped up ---
+ * but for now, simple is beautiful.
+ *
+ * Note: this is only called during planner startup, not during GEQO
+ * exploration, so we need not worry about whether we're in the right
+ * memory context.
+ */
+bool
+process_equivalence(PlannerInfo *root,
+ RestrictInfo **p_restrictinfo,
+ bool below_outer_join)
+{
+ RestrictInfo *restrictinfo = *p_restrictinfo;
+ Expr *clause = restrictinfo->clause;
+ Oid opno,
+ collation,
+ item1_type,
+ item2_type;
+ Expr *item1;
+ Expr *item2;
+ Relids item1_relids,
+ item2_relids,
+ item1_nullable_relids,
+ item2_nullable_relids;
+ List *opfamilies;
+ EquivalenceClass *ec1,
+ *ec2;
+ EquivalenceMember *em1,
+ *em2;
+ ListCell *lc1;
+ int ec2_idx;
+
+ /* Should not already be marked as having generated an eclass */
+ Assert(restrictinfo->left_ec == NULL);
+ Assert(restrictinfo->right_ec == NULL);
+
+ /* Reject if it is potentially postponable by security considerations */
+ if (restrictinfo->security_level > 0 && !restrictinfo->leakproof)
+ return false;
+
+ /* Extract info from given clause */
+ Assert(is_opclause(clause));
+ opno = ((OpExpr *) clause)->opno;
+ collation = ((OpExpr *) clause)->inputcollid;
+ item1 = (Expr *) get_leftop(clause);
+ item2 = (Expr *) get_rightop(clause);
+ item1_relids = restrictinfo->left_relids;
+ item2_relids = restrictinfo->right_relids;
+
+ /*
+ * Ensure both input expressions expose the desired collation (their types
+ * should be OK already); see comments for canonicalize_ec_expression.
+ */
+ item1 = canonicalize_ec_expression(item1,
+ exprType((Node *) item1),
+ collation);
+ item2 = canonicalize_ec_expression(item2,
+ exprType((Node *) item2),
+ collation);
+
+ /*
+ * Clauses of the form X=X cannot be translated into EquivalenceClasses.
+ * We'd either end up with a single-entry EC, losing the knowledge that
+ * the clause was present at all, or else make an EC with duplicate
+ * entries, causing other issues.
+ */
+ if (equal(item1, item2))
+ {
+ /*
+ * If the operator is strict, then the clause can be treated as just
+ * "X IS NOT NULL". (Since we know we are considering a top-level
+ * qual, we can ignore the difference between FALSE and NULL results.)
+ * It's worth making the conversion because we'll typically get a much
+ * better selectivity estimate than we would for X=X.
+ *
+ * If the operator is not strict, we can't be sure what it will do
+ * with NULLs, so don't attempt to optimize it.
+ */
+ set_opfuncid((OpExpr *) clause);
+ if (func_strict(((OpExpr *) clause)->opfuncid))
+ {
+ NullTest *ntest = makeNode(NullTest);
+
+ ntest->arg = item1;
+ ntest->nulltesttype = IS_NOT_NULL;
+ ntest->argisrow = false; /* correct even if composite arg */
+ ntest->location = -1;
+
+ *p_restrictinfo =
+ make_restrictinfo(root,
+ (Expr *) ntest,
+ restrictinfo->is_pushed_down,
+ restrictinfo->outerjoin_delayed,
+ restrictinfo->pseudoconstant,
+ restrictinfo->security_level,
+ NULL,
+ restrictinfo->outer_relids,
+ restrictinfo->nullable_relids);
+ }
+ return false;
+ }
+
+ /*
+ * If below outer join, check for strictness, else reject.
+ */
+ if (below_outer_join)
+ {
+ if (!bms_is_empty(item1_relids) &&
+ contain_nonstrict_functions((Node *) item1))
+ return false; /* LHS is non-strict but not constant */
+ if (!bms_is_empty(item2_relids) &&
+ contain_nonstrict_functions((Node *) item2))
+ return false; /* RHS is non-strict but not constant */
+ }
+
+ /* Calculate nullable-relid sets for each side of the clause */
+ item1_nullable_relids = bms_intersect(item1_relids,
+ restrictinfo->nullable_relids);
+ item2_nullable_relids = bms_intersect(item2_relids,
+ restrictinfo->nullable_relids);
+
+ /*
+ * We use the declared input types of the operator, not exprType() of the
+ * inputs, as the nominal datatypes for opfamily lookup. This presumes
+ * that btree operators are always registered with amoplefttype and
+ * amoprighttype equal to their declared input types. We will need this
+ * info anyway to build EquivalenceMember nodes, and by extracting it now
+ * we can use type comparisons to short-circuit some equal() tests.
+ */
+ op_input_types(opno, &item1_type, &item2_type);
+
+ opfamilies = restrictinfo->mergeopfamilies;
+
+ /*
+ * Sweep through the existing EquivalenceClasses looking for matches to
+ * item1 and item2. These are the possible outcomes:
+ *
+ * 1. We find both in the same EC. The equivalence is already known, so
+ * there's nothing to do.
+ *
+ * 2. We find both in different ECs. Merge the two ECs together.
+ *
+ * 3. We find just one. Add the other to its EC.
+ *
+ * 4. We find neither. Make a new, two-entry EC.
+ *
+ * Note: since all ECs are built through this process or the similar
+ * search in get_eclass_for_sort_expr(), it's impossible that we'd match
+ * an item in more than one existing nonvolatile EC. So it's okay to stop
+ * at the first match.
+ */
+ ec1 = ec2 = NULL;
+ em1 = em2 = NULL;
+ ec2_idx = -1;
+ foreach(lc1, root->eq_classes)
+ {
+ EquivalenceClass *cur_ec = (EquivalenceClass *) lfirst(lc1);
+ ListCell *lc2;
+
+ /* Never match to a volatile EC */
+ if (cur_ec->ec_has_volatile)
+ continue;
+
+ /*
+ * The collation has to match; check this first since it's cheaper
+ * than the opfamily comparison.
+ */
+ if (collation != cur_ec->ec_collation)
+ continue;
+
+ /*
+ * A "match" requires matching sets of btree opfamilies. Use of
+ * equal() for this test has implications discussed in the comments
+ * for get_mergejoin_opfamilies().
+ */
+ if (!equal(opfamilies, cur_ec->ec_opfamilies))
+ continue;
+
+ foreach(lc2, cur_ec->ec_members)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc2);
+
+ Assert(!cur_em->em_is_child); /* no children yet */
+
+ /*
+ * If below an outer join, don't match constants: they're not as
+ * constant as they look.
+ */
+ if ((below_outer_join || cur_ec->ec_below_outer_join) &&
+ cur_em->em_is_const)
+ continue;
+
+ if (!ec1 &&
+ item1_type == cur_em->em_datatype &&
+ equal(item1, cur_em->em_expr))
+ {
+ ec1 = cur_ec;
+ em1 = cur_em;
+ if (ec2)
+ break;
+ }
+
+ if (!ec2 &&
+ item2_type == cur_em->em_datatype &&
+ equal(item2, cur_em->em_expr))
+ {
+ ec2 = cur_ec;
+ ec2_idx = foreach_current_index(lc1);
+ em2 = cur_em;
+ if (ec1)
+ break;
+ }
+ }
+
+ if (ec1 && ec2)
+ break;
+ }
+
+ /* Sweep finished, what did we find? */
+
+ if (ec1 && ec2)
+ {
+ /* If case 1, nothing to do, except add to sources */
+ if (ec1 == ec2)
+ {
+ ec1->ec_sources = lappend(ec1->ec_sources, restrictinfo);
+ ec1->ec_below_outer_join |= below_outer_join;
+ ec1->ec_min_security = Min(ec1->ec_min_security,
+ restrictinfo->security_level);
+ ec1->ec_max_security = Max(ec1->ec_max_security,
+ restrictinfo->security_level);
+ /* mark the RI as associated with this eclass */
+ restrictinfo->left_ec = ec1;
+ restrictinfo->right_ec = ec1;
+ /* mark the RI as usable with this pair of EMs */
+ restrictinfo->left_em = em1;
+ restrictinfo->right_em = em2;
+ return true;
+ }
+
+ /*
+ * Case 2: need to merge ec1 and ec2. This should never happen after
+ * the ECs have reached canonical state; otherwise, pathkeys could be
+ * rendered non-canonical by the merge, and relation eclass indexes
+ * would get broken by removal of an eq_classes list entry.
+ */
+ if (root->ec_merging_done)
+ elog(ERROR, "too late to merge equivalence classes");
+
+ /*
+ * We add ec2's items to ec1, then set ec2's ec_merged link to point
+ * to ec1 and remove ec2 from the eq_classes list. We cannot simply
+ * delete ec2 because that could leave dangling pointers in existing
+ * PathKeys. We leave it behind with a link so that the merged EC can
+ * be found.
+ */
+ ec1->ec_members = list_concat(ec1->ec_members, ec2->ec_members);
+ ec1->ec_sources = list_concat(ec1->ec_sources, ec2->ec_sources);
+ ec1->ec_derives = list_concat(ec1->ec_derives, ec2->ec_derives);
+ ec1->ec_relids = bms_join(ec1->ec_relids, ec2->ec_relids);
+ ec1->ec_has_const |= ec2->ec_has_const;
+ /* can't need to set has_volatile */
+ ec1->ec_below_outer_join |= ec2->ec_below_outer_join;
+ ec1->ec_min_security = Min(ec1->ec_min_security,
+ ec2->ec_min_security);
+ ec1->ec_max_security = Max(ec1->ec_max_security,
+ ec2->ec_max_security);
+ ec2->ec_merged = ec1;
+ root->eq_classes = list_delete_nth_cell(root->eq_classes, ec2_idx);
+ /* just to avoid debugging confusion w/ dangling pointers: */
+ ec2->ec_members = NIL;
+ ec2->ec_sources = NIL;
+ ec2->ec_derives = NIL;
+ ec2->ec_relids = NULL;
+ ec1->ec_sources = lappend(ec1->ec_sources, restrictinfo);
+ ec1->ec_below_outer_join |= below_outer_join;
+ ec1->ec_min_security = Min(ec1->ec_min_security,
+ restrictinfo->security_level);
+ ec1->ec_max_security = Max(ec1->ec_max_security,
+ restrictinfo->security_level);
+ /* mark the RI as associated with this eclass */
+ restrictinfo->left_ec = ec1;
+ restrictinfo->right_ec = ec1;
+ /* mark the RI as usable with this pair of EMs */
+ restrictinfo->left_em = em1;
+ restrictinfo->right_em = em2;
+ }
+ else if (ec1)
+ {
+ /* Case 3: add item2 to ec1 */
+ em2 = add_eq_member(ec1, item2, item2_relids, item2_nullable_relids,
+ false, item2_type);
+ ec1->ec_sources = lappend(ec1->ec_sources, restrictinfo);
+ ec1->ec_below_outer_join |= below_outer_join;
+ ec1->ec_min_security = Min(ec1->ec_min_security,
+ restrictinfo->security_level);
+ ec1->ec_max_security = Max(ec1->ec_max_security,
+ restrictinfo->security_level);
+ /* mark the RI as associated with this eclass */
+ restrictinfo->left_ec = ec1;
+ restrictinfo->right_ec = ec1;
+ /* mark the RI as usable with this pair of EMs */
+ restrictinfo->left_em = em1;
+ restrictinfo->right_em = em2;
+ }
+ else if (ec2)
+ {
+ /* Case 3: add item1 to ec2 */
+ em1 = add_eq_member(ec2, item1, item1_relids, item1_nullable_relids,
+ false, item1_type);
+ ec2->ec_sources = lappend(ec2->ec_sources, restrictinfo);
+ ec2->ec_below_outer_join |= below_outer_join;
+ ec2->ec_min_security = Min(ec2->ec_min_security,
+ restrictinfo->security_level);
+ ec2->ec_max_security = Max(ec2->ec_max_security,
+ restrictinfo->security_level);
+ /* mark the RI as associated with this eclass */
+ restrictinfo->left_ec = ec2;
+ restrictinfo->right_ec = ec2;
+ /* mark the RI as usable with this pair of EMs */
+ restrictinfo->left_em = em1;
+ restrictinfo->right_em = em2;
+ }
+ else
+ {
+ /* Case 4: make a new, two-entry EC */
+ EquivalenceClass *ec = makeNode(EquivalenceClass);
+
+ ec->ec_opfamilies = opfamilies;
+ ec->ec_collation = collation;
+ ec->ec_members = NIL;
+ ec->ec_sources = list_make1(restrictinfo);
+ ec->ec_derives = NIL;
+ ec->ec_relids = NULL;
+ ec->ec_has_const = false;
+ ec->ec_has_volatile = false;
+ ec->ec_below_outer_join = below_outer_join;
+ ec->ec_broken = false;
+ ec->ec_sortref = 0;
+ ec->ec_min_security = restrictinfo->security_level;
+ ec->ec_max_security = restrictinfo->security_level;
+ ec->ec_merged = NULL;
+ em1 = add_eq_member(ec, item1, item1_relids, item1_nullable_relids,
+ false, item1_type);
+ em2 = add_eq_member(ec, item2, item2_relids, item2_nullable_relids,
+ false, item2_type);
+
+ root->eq_classes = lappend(root->eq_classes, ec);
+
+ /* mark the RI as associated with this eclass */
+ restrictinfo->left_ec = ec;
+ restrictinfo->right_ec = ec;
+ /* mark the RI as usable with this pair of EMs */
+ restrictinfo->left_em = em1;
+ restrictinfo->right_em = em2;
+ }
+
+ return true;
+}
+
+/*
+ * canonicalize_ec_expression
+ *
+ * This function ensures that the expression exposes the expected type and
+ * collation, so that it will be equal() to other equivalence-class expressions
+ * that it ought to be equal() to.
+ *
+ * The rule for datatypes is that the exposed type should match what it would
+ * be for an input to an operator of the EC's opfamilies; which is usually
+ * the declared input type of the operator, but in the case of polymorphic
+ * operators no relabeling is wanted (compare the behavior of parse_coerce.c).
+ * Expressions coming in from quals will generally have the right type
+ * already, but expressions coming from indexkeys may not (because they are
+ * represented without any explicit relabel in pg_index), and the same problem
+ * occurs for sort expressions (because the parser is likewise cavalier about
+ * putting relabels on them). Such cases will be binary-compatible with the
+ * real operators, so adding a RelabelType is sufficient.
+ *
+ * Also, the expression's exposed collation must match the EC's collation.
+ * This is important because in comparisons like "foo < bar COLLATE baz",
+ * only one of the expressions has the correct exposed collation as we receive
+ * it from the parser. Forcing both of them to have it ensures that all
+ * variant spellings of such a construct behave the same. Again, we can
+ * stick on a RelabelType to force the right exposed collation. (It might
+ * work to not label the collation at all in EC members, but this is risky
+ * since some parts of the system expect exprCollation() to deliver the
+ * right answer for a sort key.)
+ */
+Expr *
+canonicalize_ec_expression(Expr *expr, Oid req_type, Oid req_collation)
+{
+ Oid expr_type = exprType((Node *) expr);
+
+ /*
+ * For a polymorphic-input-type opclass, just keep the same exposed type.
+ * RECORD opclasses work like polymorphic-type ones for this purpose.
+ */
+ if (IsPolymorphicType(req_type) || req_type == RECORDOID)
+ req_type = expr_type;
+
+ /*
+ * No work if the expression exposes the right type/collation already.
+ */
+ if (expr_type != req_type ||
+ exprCollation((Node *) expr) != req_collation)
+ {
+ /*
+ * If we have to change the type of the expression, set typmod to -1,
+ * since the new type may not have the same typmod interpretation.
+ * When we only have to change collation, preserve the exposed typmod.
+ */
+ int32 req_typmod;
+
+ if (expr_type != req_type)
+ req_typmod = -1;
+ else
+ req_typmod = exprTypmod((Node *) expr);
+
+ /*
+ * Use applyRelabelType so that we preserve const-flatness. This is
+ * important since eval_const_expressions has already been applied.
+ */
+ expr = (Expr *) applyRelabelType((Node *) expr,
+ req_type, req_typmod, req_collation,
+ COERCE_IMPLICIT_CAST, -1, false);
+ }
+
+ return expr;
+}
+
+/*
+ * add_eq_member - build a new EquivalenceMember and add it to an EC
+ */
+static EquivalenceMember *
+add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids,
+ Relids nullable_relids, bool is_child, Oid datatype)
+{
+ EquivalenceMember *em = makeNode(EquivalenceMember);
+
+ em->em_expr = expr;
+ em->em_relids = relids;
+ em->em_nullable_relids = nullable_relids;
+ em->em_is_const = false;
+ em->em_is_child = is_child;
+ em->em_datatype = datatype;
+
+ if (bms_is_empty(relids))
+ {
+ /*
+ * No Vars, assume it's a pseudoconstant. This is correct for entries
+ * generated from process_equivalence(), because a WHERE clause can't
+ * contain aggregates or SRFs, and non-volatility was checked before
+ * process_equivalence() ever got called. But
+ * get_eclass_for_sort_expr() has to work harder. We put the tests
+ * there not here to save cycles in the equivalence case.
+ */
+ Assert(!is_child);
+ em->em_is_const = true;
+ ec->ec_has_const = true;
+ /* it can't affect ec_relids */
+ }
+ else if (!is_child) /* child members don't add to ec_relids */
+ {
+ ec->ec_relids = bms_add_members(ec->ec_relids, relids);
+ }
+ ec->ec_members = lappend(ec->ec_members, em);
+
+ return em;
+}
+
+
+/*
+ * get_eclass_for_sort_expr
+ * Given an expression and opfamily/collation info, find an existing
+ * equivalence class it is a member of; if none, optionally build a new
+ * single-member EquivalenceClass for it.
+ *
+ * expr is the expression, and nullable_relids is the set of base relids
+ * that are potentially nullable below it. We actually only care about
+ * the set of such relids that are used in the expression; but for caller
+ * convenience, we perform that intersection step here. The caller need
+ * only be sure that nullable_relids doesn't omit any nullable rels that
+ * might appear in the expr.
+ *
+ * sortref is the SortGroupRef of the originating SortGroupClause, if any,
+ * or zero if not. (It should never be zero if the expression is volatile!)
+ *
+ * If rel is not NULL, it identifies a specific relation we're considering
+ * a path for, and indicates that child EC members for that relation can be
+ * considered. Otherwise child members are ignored. (Note: since child EC
+ * members aren't guaranteed unique, a non-NULL value means that there could
+ * be more than one EC that matches the expression; if so it's order-dependent
+ * which one you get. This is annoying but it only happens in corner cases,
+ * so for now we live with just reporting the first match. See also
+ * generate_implied_equalities_for_column and match_pathkeys_to_index.)
+ *
+ * If create_it is true, we'll build a new EquivalenceClass when there is no
+ * match. If create_it is false, we just return NULL when no match.
+ *
+ * This can be used safely both before and after EquivalenceClass merging;
+ * since it never causes merging it does not invalidate any existing ECs
+ * or PathKeys. However, ECs added after path generation has begun are
+ * of limited usefulness, so usually it's best to create them beforehand.
+ *
+ * Note: opfamilies must be chosen consistently with the way
+ * process_equivalence() would do; that is, generated from a mergejoinable
+ * equality operator. Else we might fail to detect valid equivalences,
+ * generating poor (but not incorrect) plans.
+ */
+EquivalenceClass *
+get_eclass_for_sort_expr(PlannerInfo *root,
+ Expr *expr,
+ Relids nullable_relids,
+ List *opfamilies,
+ Oid opcintype,
+ Oid collation,
+ Index sortref,
+ Relids rel,
+ bool create_it)
+{
+ Relids expr_relids;
+ EquivalenceClass *newec;
+ EquivalenceMember *newem;
+ ListCell *lc1;
+ MemoryContext oldcontext;
+
+ /*
+ * Ensure the expression exposes the correct type and collation.
+ */
+ expr = canonicalize_ec_expression(expr, opcintype, collation);
+
+ /*
+ * Scan through the existing EquivalenceClasses for a match
+ */
+ foreach(lc1, root->eq_classes)
+ {
+ EquivalenceClass *cur_ec = (EquivalenceClass *) lfirst(lc1);
+ ListCell *lc2;
+
+ /*
+ * Never match to a volatile EC, except when we are looking at another
+ * reference to the same volatile SortGroupClause.
+ */
+ if (cur_ec->ec_has_volatile &&
+ (sortref == 0 || sortref != cur_ec->ec_sortref))
+ continue;
+
+ if (collation != cur_ec->ec_collation)
+ continue;
+ if (!equal(opfamilies, cur_ec->ec_opfamilies))
+ continue;
+
+ foreach(lc2, cur_ec->ec_members)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc2);
+
+ /*
+ * Ignore child members unless they match the request.
+ */
+ if (cur_em->em_is_child &&
+ !bms_equal(cur_em->em_relids, rel))
+ continue;
+
+ /*
+ * If below an outer join, don't match constants: they're not as
+ * constant as they look.
+ */
+ if (cur_ec->ec_below_outer_join &&
+ cur_em->em_is_const)
+ continue;
+
+ if (opcintype == cur_em->em_datatype &&
+ equal(expr, cur_em->em_expr))
+ return cur_ec; /* Match! */
+ }
+ }
+
+ /* No match; does caller want a NULL result? */
+ if (!create_it)
+ return NULL;
+
+ /*
+ * OK, build a new single-member EC
+ *
+ * Here, we must be sure that we construct the EC in the right context.
+ */
+ oldcontext = MemoryContextSwitchTo(root->planner_cxt);
+
+ newec = makeNode(EquivalenceClass);
+ newec->ec_opfamilies = list_copy(opfamilies);
+ newec->ec_collation = collation;
+ newec->ec_members = NIL;
+ newec->ec_sources = NIL;
+ newec->ec_derives = NIL;
+ newec->ec_relids = NULL;
+ newec->ec_has_const = false;
+ newec->ec_has_volatile = contain_volatile_functions((Node *) expr);
+ newec->ec_below_outer_join = false;
+ newec->ec_broken = false;
+ newec->ec_sortref = sortref;
+ newec->ec_min_security = UINT_MAX;
+ newec->ec_max_security = 0;
+ newec->ec_merged = NULL;
+
+ if (newec->ec_has_volatile && sortref == 0) /* should not happen */
+ elog(ERROR, "volatile EquivalenceClass has no sortref");
+
+ /*
+ * Get the precise set of nullable relids appearing in the expression.
+ */
+ expr_relids = pull_varnos(root, (Node *) expr);
+ nullable_relids = bms_intersect(nullable_relids, expr_relids);
+
+ newem = add_eq_member(newec, copyObject(expr), expr_relids,
+ nullable_relids, false, opcintype);
+
+ /*
+ * add_eq_member doesn't check for volatile functions, set-returning
+ * functions, aggregates, or window functions, but such could appear in
+ * sort expressions; so we have to check whether its const-marking was
+ * correct.
+ */
+ if (newec->ec_has_const)
+ {
+ if (newec->ec_has_volatile ||
+ expression_returns_set((Node *) expr) ||
+ contain_agg_clause((Node *) expr) ||
+ contain_window_function((Node *) expr))
+ {
+ newec->ec_has_const = false;
+ newem->em_is_const = false;
+ }
+ }
+
+ root->eq_classes = lappend(root->eq_classes, newec);
+
+ /*
+ * If EC merging is already complete, we have to mop up by adding the new
+ * EC to the eclass_indexes of the relation(s) mentioned in it.
+ */
+ if (root->ec_merging_done)
+ {
+ int ec_index = list_length(root->eq_classes) - 1;
+ int i = -1;
+
+ while ((i = bms_next_member(newec->ec_relids, i)) > 0)
+ {
+ RelOptInfo *rel = root->simple_rel_array[i];
+
+ Assert(rel->reloptkind == RELOPT_BASEREL ||
+ rel->reloptkind == RELOPT_DEADREL);
+
+ rel->eclass_indexes = bms_add_member(rel->eclass_indexes,
+ ec_index);
+ }
+ }
+
+ MemoryContextSwitchTo(oldcontext);
+
+ return newec;
+}
+
+/*
+ * find_ec_member_matching_expr
+ * Locate an EquivalenceClass member matching the given expr, if any;
+ * return NULL if no match.
+ *
+ * "Matching" is defined as "equal after stripping RelabelTypes".
+ * This is used for identifying sort expressions, and we need to allow
+ * binary-compatible relabeling for some cases involving binary-compatible
+ * sort operators.
+ *
+ * Child EC members are ignored unless they belong to given 'relids'.
+ */
+EquivalenceMember *
+find_ec_member_matching_expr(EquivalenceClass *ec,
+ Expr *expr,
+ Relids relids)
+{
+ ListCell *lc;
+
+ /* We ignore binary-compatible relabeling on both ends */
+ while (expr && IsA(expr, RelabelType))
+ expr = ((RelabelType *) expr)->arg;
+
+ foreach(lc, ec->ec_members)
+ {
+ EquivalenceMember *em = (EquivalenceMember *) lfirst(lc);
+ Expr *emexpr;
+
+ /*
+ * We shouldn't be trying to sort by an equivalence class that
+ * contains a constant, so no need to consider such cases any further.
+ */
+ if (em->em_is_const)
+ continue;
+
+ /*
+ * Ignore child members unless they belong to the requested rel.
+ */
+ if (em->em_is_child &&
+ !bms_is_subset(em->em_relids, relids))
+ continue;
+
+ /*
+ * Match if same expression (after stripping relabel).
+ */
+ emexpr = em->em_expr;
+ while (emexpr && IsA(emexpr, RelabelType))
+ emexpr = ((RelabelType *) emexpr)->arg;
+
+ if (equal(emexpr, expr))
+ return em;
+ }
+
+ return NULL;
+}
+
+/*
+ * find_computable_ec_member
+ * Locate an EquivalenceClass member that can be computed from the
+ * expressions appearing in "exprs"; return NULL if no match.
+ *
+ * "exprs" can be either a list of bare expression trees, or a list of
+ * TargetEntry nodes. Either way, it should contain Vars and possibly
+ * Aggrefs and WindowFuncs, which are matched to the corresponding elements
+ * of the EquivalenceClass's expressions.
+ *
+ * Unlike find_ec_member_matching_expr, there's no special provision here
+ * for binary-compatible relabeling. This is intentional: if we have to
+ * compute an expression in this way, setrefs.c is going to insist on exact
+ * matches of Vars to the source tlist.
+ *
+ * Child EC members are ignored unless they belong to given 'relids'.
+ * Also, non-parallel-safe expressions are ignored if 'require_parallel_safe'.
+ *
+ * Note: some callers pass root == NULL for notational reasons. This is OK
+ * when require_parallel_safe is false.
+ */
+EquivalenceMember *
+find_computable_ec_member(PlannerInfo *root,
+ EquivalenceClass *ec,
+ List *exprs,
+ Relids relids,
+ bool require_parallel_safe)
+{
+ ListCell *lc;
+
+ foreach(lc, ec->ec_members)
+ {
+ EquivalenceMember *em = (EquivalenceMember *) lfirst(lc);
+ List *exprvars;
+ ListCell *lc2;
+
+ /*
+ * We shouldn't be trying to sort by an equivalence class that
+ * contains a constant, so no need to consider such cases any further.
+ */
+ if (em->em_is_const)
+ continue;
+
+ /*
+ * Ignore child members unless they belong to the requested rel.
+ */
+ if (em->em_is_child &&
+ !bms_is_subset(em->em_relids, relids))
+ continue;
+
+ /*
+ * Match if all Vars and quasi-Vars are available in "exprs".
+ */
+ exprvars = pull_var_clause((Node *) em->em_expr,
+ PVC_INCLUDE_AGGREGATES |
+ PVC_INCLUDE_WINDOWFUNCS |
+ PVC_INCLUDE_PLACEHOLDERS);
+ foreach(lc2, exprvars)
+ {
+ if (!is_exprlist_member(lfirst(lc2), exprs))
+ break;
+ }
+ list_free(exprvars);
+ if (lc2)
+ continue; /* we hit a non-available Var */
+
+ /*
+ * If requested, reject expressions that are not parallel-safe. We
+ * check this last because it's a rather expensive test.
+ */
+ if (require_parallel_safe &&
+ !is_parallel_safe(root, (Node *) em->em_expr))
+ continue;
+
+ return em; /* found usable expression */
+ }
+
+ return NULL;
+}
+
+/*
+ * is_exprlist_member
+ * Subroutine for find_computable_ec_member: is "node" in "exprs"?
+ *
+ * Per the requirements of that function, "exprs" might or might not have
+ * TargetEntry superstructure.
+ */
+static bool
+is_exprlist_member(Expr *node, List *exprs)
+{
+ ListCell *lc;
+
+ foreach(lc, exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+
+ if (expr && IsA(expr, TargetEntry))
+ expr = ((TargetEntry *) expr)->expr;
+
+ if (equal(node, expr))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * relation_can_be_sorted_early
+ * Can this relation be sorted on this EC before the final output step?
+ *
+ * To succeed, we must find an EC member that prepare_sort_from_pathkeys knows
+ * how to sort on, given the rel's reltarget as input. There are also a few
+ * additional constraints based on the fact that the desired sort will be done
+ * "early", within the scan/join part of the plan. Also, non-parallel-safe
+ * expressions are ignored if 'require_parallel_safe'.
+ *
+ * At some point we might want to return the identified EquivalenceMember,
+ * but for now, callers only want to know if there is one.
+ */
+bool
+relation_can_be_sorted_early(PlannerInfo *root, RelOptInfo *rel,
+ EquivalenceClass *ec, bool require_parallel_safe)
+{
+ PathTarget *target = rel->reltarget;
+ EquivalenceMember *em;
+ ListCell *lc;
+
+ /*
+ * Reject volatile ECs immediately; such sorts must always be postponed.
+ */
+ if (ec->ec_has_volatile)
+ return false;
+
+ /*
+ * Try to find an EM directly matching some reltarget member.
+ */
+ foreach(lc, target->exprs)
+ {
+ Expr *targetexpr = (Expr *) lfirst(lc);
+
+ em = find_ec_member_matching_expr(ec, targetexpr, rel->relids);
+ if (!em)
+ continue;
+
+ /*
+ * Reject expressions involving set-returning functions, as those
+ * can't be computed early either. (Note: this test and the following
+ * one are effectively checking properties of targetexpr, so there's
+ * no point in asking whether some other EC member would be better.)
+ */
+ if (expression_returns_set((Node *) em->em_expr))
+ continue;
+
+ /*
+ * If requested, reject expressions that are not parallel-safe. We
+ * check this last because it's a rather expensive test.
+ */
+ if (require_parallel_safe &&
+ !is_parallel_safe(root, (Node *) em->em_expr))
+ continue;
+
+ return true;
+ }
+
+ /*
+ * Try to find an expression computable from the reltarget.
+ */
+ em = find_computable_ec_member(root, ec, target->exprs, rel->relids,
+ require_parallel_safe);
+ if (!em)
+ return false;
+
+ /*
+ * Reject expressions involving set-returning functions, as those can't be
+ * computed early either. (There's no point in looking for another EC
+ * member in this case; since SRFs can't appear in WHERE, they cannot
+ * belong to multi-member ECs.)
+ */
+ if (expression_returns_set((Node *) em->em_expr))
+ return false;
+
+ return true;
+}
+
+/*
+ * generate_base_implied_equalities
+ * Generate any restriction clauses that we can deduce from equivalence
+ * classes.
+ *
+ * When an EC contains pseudoconstants, our strategy is to generate
+ * "member = const1" clauses where const1 is the first constant member, for
+ * every other member (including other constants). If we are able to do this
+ * then we don't need any "var = var" comparisons because we've successfully
+ * constrained all the vars at their points of creation. If we fail to
+ * generate any of these clauses due to lack of cross-type operators, we fall
+ * back to the "ec_broken" strategy described below. (XXX if there are
+ * multiple constants of different types, it's possible that we might succeed
+ * in forming all the required clauses if we started from a different const
+ * member; but this seems a sufficiently hokey corner case to not be worth
+ * spending lots of cycles on.)
+ *
+ * For ECs that contain no pseudoconstants, we generate derived clauses
+ * "member1 = member2" for each pair of members belonging to the same base
+ * relation (actually, if there are more than two for the same base relation,
+ * we only need enough clauses to link each to each other). This provides
+ * the base case for the recursion: each row emitted by a base relation scan
+ * will constrain all computable members of the EC to be equal. As each
+ * join path is formed, we'll add additional derived clauses on-the-fly
+ * to maintain this invariant (see generate_join_implied_equalities).
+ *
+ * If the opfamilies used by the EC do not provide complete sets of cross-type
+ * equality operators, it is possible that we will fail to generate a clause
+ * that must be generated to maintain the invariant. (An example: given
+ * "WHERE a.x = b.y AND b.y = a.z", the scheme breaks down if we cannot
+ * generate "a.x = a.z" as a restriction clause for A.) In this case we mark
+ * the EC "ec_broken" and fall back to regurgitating its original source
+ * RestrictInfos at appropriate times. We do not try to retract any derived
+ * clauses already generated from the broken EC, so the resulting plan could
+ * be poor due to bad selectivity estimates caused by redundant clauses. But
+ * the correct solution to that is to fix the opfamilies ...
+ *
+ * Equality clauses derived by this function are passed off to
+ * process_implied_equality (in plan/initsplan.c) to be inserted into the
+ * restrictinfo datastructures. Note that this must be called after initial
+ * scanning of the quals and before Path construction begins.
+ *
+ * We make no attempt to avoid generating duplicate RestrictInfos here: we
+ * don't search ec_sources or ec_derives for matches. It doesn't really
+ * seem worth the trouble to do so.
+ */
+void
+generate_base_implied_equalities(PlannerInfo *root)
+{
+ int ec_index;
+ ListCell *lc;
+
+ /*
+ * At this point, we're done absorbing knowledge of equivalences in the
+ * query, so no further EC merging should happen, and ECs remaining in the
+ * eq_classes list can be considered canonical. (But note that it's still
+ * possible for new single-member ECs to be added through
+ * get_eclass_for_sort_expr().)
+ */
+ root->ec_merging_done = true;
+
+ ec_index = 0;
+ foreach(lc, root->eq_classes)
+ {
+ EquivalenceClass *ec = (EquivalenceClass *) lfirst(lc);
+ bool can_generate_joinclause = false;
+ int i;
+
+ Assert(ec->ec_merged == NULL); /* else shouldn't be in list */
+ Assert(!ec->ec_broken); /* not yet anyway... */
+
+ /*
+ * Generate implied equalities that are restriction clauses.
+ * Single-member ECs won't generate any deductions, either here or at
+ * the join level.
+ */
+ if (list_length(ec->ec_members) > 1)
+ {
+ if (ec->ec_has_const)
+ generate_base_implied_equalities_const(root, ec);
+ else
+ generate_base_implied_equalities_no_const(root, ec);
+
+ /* Recover if we failed to generate required derived clauses */
+ if (ec->ec_broken)
+ generate_base_implied_equalities_broken(root, ec);
+
+ /* Detect whether this EC might generate join clauses */
+ can_generate_joinclause =
+ (bms_membership(ec->ec_relids) == BMS_MULTIPLE);
+ }
+
+ /*
+ * Mark the base rels cited in each eclass (which should all exist by
+ * now) with the eq_classes indexes of all eclasses mentioning them.
+ * This will let us avoid searching in subsequent lookups. While
+ * we're at it, we can mark base rels that have pending eclass joins;
+ * this is a cheap version of has_relevant_eclass_joinclause().
+ */
+ i = -1;
+ while ((i = bms_next_member(ec->ec_relids, i)) > 0)
+ {
+ RelOptInfo *rel = root->simple_rel_array[i];
+
+ Assert(rel->reloptkind == RELOPT_BASEREL);
+
+ rel->eclass_indexes = bms_add_member(rel->eclass_indexes,
+ ec_index);
+
+ if (can_generate_joinclause)
+ rel->has_eclass_joins = true;
+ }
+
+ ec_index++;
+ }
+}
+
+/*
+ * generate_base_implied_equalities when EC contains pseudoconstant(s)
+ */
+static void
+generate_base_implied_equalities_const(PlannerInfo *root,
+ EquivalenceClass *ec)
+{
+ EquivalenceMember *const_em = NULL;
+ ListCell *lc;
+
+ /*
+ * In the trivial case where we just had one "var = const" clause, push
+ * the original clause back into the main planner machinery. There is
+ * nothing to be gained by doing it differently, and we save the effort to
+ * re-build and re-analyze an equality clause that will be exactly
+ * equivalent to the old one.
+ */
+ if (list_length(ec->ec_members) == 2 &&
+ list_length(ec->ec_sources) == 1)
+ {
+ RestrictInfo *restrictinfo = (RestrictInfo *) linitial(ec->ec_sources);
+
+ if (bms_membership(restrictinfo->required_relids) != BMS_MULTIPLE)
+ {
+ distribute_restrictinfo_to_rels(root, restrictinfo);
+ return;
+ }
+ }
+
+ /*
+ * Find the constant member to use. We prefer an actual constant to
+ * pseudo-constants (such as Params), because the constraint exclusion
+ * machinery might be able to exclude relations on the basis of generated
+ * "var = const" equalities, but "var = param" won't work for that.
+ */
+ foreach(lc, ec->ec_members)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc);
+
+ if (cur_em->em_is_const)
+ {
+ const_em = cur_em;
+ if (IsA(cur_em->em_expr, Const))
+ break;
+ }
+ }
+ Assert(const_em != NULL);
+
+ /* Generate a derived equality against each other member */
+ foreach(lc, ec->ec_members)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc);
+ Oid eq_op;
+ RestrictInfo *rinfo;
+
+ Assert(!cur_em->em_is_child); /* no children yet */
+ if (cur_em == const_em)
+ continue;
+ eq_op = select_equality_operator(ec,
+ cur_em->em_datatype,
+ const_em->em_datatype);
+ if (!OidIsValid(eq_op))
+ {
+ /* failed... */
+ ec->ec_broken = true;
+ break;
+ }
+ rinfo = process_implied_equality(root, eq_op, ec->ec_collation,
+ cur_em->em_expr, const_em->em_expr,
+ bms_copy(ec->ec_relids),
+ bms_union(cur_em->em_nullable_relids,
+ const_em->em_nullable_relids),
+ ec->ec_min_security,
+ ec->ec_below_outer_join,
+ cur_em->em_is_const);
+
+ /*
+ * If the clause didn't degenerate to a constant, fill in the correct
+ * markings for a mergejoinable clause, and save it in ec_derives. (We
+ * will not re-use such clauses directly, but selectivity estimation
+ * may consult the list later. Note that this use of ec_derives does
+ * not overlap with its use for join clauses, since we never generate
+ * join clauses from an ec_has_const eclass.)
+ */
+ if (rinfo && rinfo->mergeopfamilies)
+ {
+ /* it's not redundant, so don't set parent_ec */
+ rinfo->left_ec = rinfo->right_ec = ec;
+ rinfo->left_em = cur_em;
+ rinfo->right_em = const_em;
+ ec->ec_derives = lappend(ec->ec_derives, rinfo);
+ }
+ }
+}
+
+/*
+ * generate_base_implied_equalities when EC contains no pseudoconstants
+ */
+static void
+generate_base_implied_equalities_no_const(PlannerInfo *root,
+ EquivalenceClass *ec)
+{
+ EquivalenceMember **prev_ems;
+ ListCell *lc;
+
+ /*
+ * We scan the EC members once and track the last-seen member for each
+ * base relation. When we see another member of the same base relation,
+ * we generate "prev_em = cur_em". This results in the minimum number of
+ * derived clauses, but it's possible that it will fail when a different
+ * ordering would succeed. XXX FIXME: use a UNION-FIND algorithm similar
+ * to the way we build merged ECs. (Use a list-of-lists for each rel.)
+ */
+ prev_ems = (EquivalenceMember **)
+ palloc0(root->simple_rel_array_size * sizeof(EquivalenceMember *));
+
+ foreach(lc, ec->ec_members)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc);
+ int relid;
+
+ Assert(!cur_em->em_is_child); /* no children yet */
+ if (!bms_get_singleton_member(cur_em->em_relids, &relid))
+ continue;
+ Assert(relid < root->simple_rel_array_size);
+
+ if (prev_ems[relid] != NULL)
+ {
+ EquivalenceMember *prev_em = prev_ems[relid];
+ Oid eq_op;
+ RestrictInfo *rinfo;
+
+ eq_op = select_equality_operator(ec,
+ prev_em->em_datatype,
+ cur_em->em_datatype);
+ if (!OidIsValid(eq_op))
+ {
+ /* failed... */
+ ec->ec_broken = true;
+ break;
+ }
+ rinfo = process_implied_equality(root, eq_op, ec->ec_collation,
+ prev_em->em_expr, cur_em->em_expr,
+ bms_copy(ec->ec_relids),
+ bms_union(prev_em->em_nullable_relids,
+ cur_em->em_nullable_relids),
+ ec->ec_min_security,
+ ec->ec_below_outer_join,
+ false);
+
+ /*
+ * If the clause didn't degenerate to a constant, fill in the
+ * correct markings for a mergejoinable clause. We don't put it
+ * in ec_derives however; we don't currently need to re-find such
+ * clauses, and we don't want to clutter that list with non-join
+ * clauses.
+ */
+ if (rinfo && rinfo->mergeopfamilies)
+ {
+ /* it's not redundant, so don't set parent_ec */
+ rinfo->left_ec = rinfo->right_ec = ec;
+ rinfo->left_em = prev_em;
+ rinfo->right_em = cur_em;
+ }
+ }
+ prev_ems[relid] = cur_em;
+ }
+
+ pfree(prev_ems);
+
+ /*
+ * We also have to make sure that all the Vars used in the member clauses
+ * will be available at any join node we might try to reference them at.
+ * For the moment we force all the Vars to be available at all join nodes
+ * for this eclass. Perhaps this could be improved by doing some
+ * pre-analysis of which members we prefer to join, but it's no worse than
+ * what happened in the pre-8.3 code.
+ */
+ foreach(lc, ec->ec_members)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc);
+ List *vars = pull_var_clause((Node *) cur_em->em_expr,
+ PVC_RECURSE_AGGREGATES |
+ PVC_RECURSE_WINDOWFUNCS |
+ PVC_INCLUDE_PLACEHOLDERS);
+
+ add_vars_to_targetlist(root, vars, ec->ec_relids, false);
+ list_free(vars);
+ }
+}
+
+/*
+ * generate_base_implied_equalities cleanup after failure
+ *
+ * What we must do here is push any zero- or one-relation source RestrictInfos
+ * of the EC back into the main restrictinfo datastructures. Multi-relation
+ * clauses will be regurgitated later by generate_join_implied_equalities().
+ * (We do it this way to maintain continuity with the case that ec_broken
+ * becomes set only after we've gone up a join level or two.) However, for
+ * an EC that contains constants, we can adopt a simpler strategy and just
+ * throw back all the source RestrictInfos immediately; that works because
+ * we know that such an EC can't become broken later. (This rule justifies
+ * ignoring ec_has_const ECs in generate_join_implied_equalities, even when
+ * they are broken.)
+ */
+static void
+generate_base_implied_equalities_broken(PlannerInfo *root,
+ EquivalenceClass *ec)
+{
+ ListCell *lc;
+
+ foreach(lc, ec->ec_sources)
+ {
+ RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(lc);
+
+ if (ec->ec_has_const ||
+ bms_membership(restrictinfo->required_relids) != BMS_MULTIPLE)
+ distribute_restrictinfo_to_rels(root, restrictinfo);
+ }
+}
+
+
+/*
+ * generate_join_implied_equalities
+ * Generate any join clauses that we can deduce from equivalence classes.
+ *
+ * At a join node, we must enforce restriction clauses sufficient to ensure
+ * that all equivalence-class members computable at that node are equal.
+ * Since the set of clauses to enforce can vary depending on which subset
+ * relations are the inputs, we have to compute this afresh for each join
+ * relation pair. Hence a fresh List of RestrictInfo nodes is built and
+ * passed back on each call.
+ *
+ * In addition to its use at join nodes, this can be applied to generate
+ * eclass-based join clauses for use in a parameterized scan of a base rel.
+ * The reason for the asymmetry of specifying the inner rel as a RelOptInfo
+ * and the outer rel by Relids is that this usage occurs before we have
+ * built any join RelOptInfos.
+ *
+ * An annoying special case for parameterized scans is that the inner rel can
+ * be an appendrel child (an "other rel"). In this case we must generate
+ * appropriate clauses using child EC members. add_child_rel_equivalences
+ * must already have been done for the child rel.
+ *
+ * The results are sufficient for use in merge, hash, and plain nestloop join
+ * methods. We do not worry here about selecting clauses that are optimal
+ * for use in a parameterized indexscan. indxpath.c makes its own selections
+ * of clauses to use, and if the ones we pick here are redundant with those,
+ * the extras will be eliminated at createplan time, using the parent_ec
+ * markers that we provide (see is_redundant_derived_clause()).
+ *
+ * Because the same join clauses are likely to be needed multiple times as
+ * we consider different join paths, we avoid generating multiple copies:
+ * whenever we select a particular pair of EquivalenceMembers to join,
+ * we check to see if the pair matches any original clause (in ec_sources)
+ * or previously-built clause (in ec_derives). This saves memory and allows
+ * re-use of information cached in RestrictInfos.
+ *
+ * join_relids should always equal bms_union(outer_relids, inner_rel->relids).
+ * We could simplify this function's API by computing it internally, but in
+ * most current uses, the caller has the value at hand anyway.
+ */
+List *
+generate_join_implied_equalities(PlannerInfo *root,
+ Relids join_relids,
+ Relids outer_relids,
+ RelOptInfo *inner_rel)
+{
+ List *result = NIL;
+ Relids inner_relids = inner_rel->relids;
+ Relids nominal_inner_relids;
+ Relids nominal_join_relids;
+ Bitmapset *matching_ecs;
+ int i;
+
+ /* If inner rel is a child, extra setup work is needed */
+ if (IS_OTHER_REL(inner_rel))
+ {
+ Assert(!bms_is_empty(inner_rel->top_parent_relids));
+
+ /* Fetch relid set for the topmost parent rel */
+ nominal_inner_relids = inner_rel->top_parent_relids;
+ /* ECs will be marked with the parent's relid, not the child's */
+ nominal_join_relids = bms_union(outer_relids, nominal_inner_relids);
+ }
+ else
+ {
+ nominal_inner_relids = inner_relids;
+ nominal_join_relids = join_relids;
+ }
+
+ /*
+ * Get all eclasses that mention both inner and outer sides of the join
+ */
+ matching_ecs = get_common_eclass_indexes(root, nominal_inner_relids,
+ outer_relids);
+
+ i = -1;
+ while ((i = bms_next_member(matching_ecs, i)) >= 0)
+ {
+ EquivalenceClass *ec = (EquivalenceClass *) list_nth(root->eq_classes, i);
+ List *sublist = NIL;
+
+ /* ECs containing consts do not need any further enforcement */
+ if (ec->ec_has_const)
+ continue;
+
+ /* Single-member ECs won't generate any deductions */
+ if (list_length(ec->ec_members) <= 1)
+ continue;
+
+ /* Sanity check that this eclass overlaps the join */
+ Assert(bms_overlap(ec->ec_relids, nominal_join_relids));
+
+ if (!ec->ec_broken)
+ sublist = generate_join_implied_equalities_normal(root,
+ ec,
+ join_relids,
+ outer_relids,
+ inner_relids);
+
+ /* Recover if we failed to generate required derived clauses */
+ if (ec->ec_broken)
+ sublist = generate_join_implied_equalities_broken(root,
+ ec,
+ nominal_join_relids,
+ outer_relids,
+ nominal_inner_relids,
+ inner_rel);
+
+ result = list_concat(result, sublist);
+ }
+
+ return result;
+}
+
+/*
+ * generate_join_implied_equalities_for_ecs
+ * As above, but consider only the listed ECs.
+ */
+List *
+generate_join_implied_equalities_for_ecs(PlannerInfo *root,
+ List *eclasses,
+ Relids join_relids,
+ Relids outer_relids,
+ RelOptInfo *inner_rel)
+{
+ List *result = NIL;
+ Relids inner_relids = inner_rel->relids;
+ Relids nominal_inner_relids;
+ Relids nominal_join_relids;
+ ListCell *lc;
+
+ /* If inner rel is a child, extra setup work is needed */
+ if (IS_OTHER_REL(inner_rel))
+ {
+ Assert(!bms_is_empty(inner_rel->top_parent_relids));
+
+ /* Fetch relid set for the topmost parent rel */
+ nominal_inner_relids = inner_rel->top_parent_relids;
+ /* ECs will be marked with the parent's relid, not the child's */
+ nominal_join_relids = bms_union(outer_relids, nominal_inner_relids);
+ }
+ else
+ {
+ nominal_inner_relids = inner_relids;
+ nominal_join_relids = join_relids;
+ }
+
+ foreach(lc, eclasses)
+ {
+ EquivalenceClass *ec = (EquivalenceClass *) lfirst(lc);
+ List *sublist = NIL;
+
+ /* ECs containing consts do not need any further enforcement */
+ if (ec->ec_has_const)
+ continue;
+
+ /* Single-member ECs won't generate any deductions */
+ if (list_length(ec->ec_members) <= 1)
+ continue;
+
+ /* We can quickly ignore any that don't overlap the join, too */
+ if (!bms_overlap(ec->ec_relids, nominal_join_relids))
+ continue;
+
+ if (!ec->ec_broken)
+ sublist = generate_join_implied_equalities_normal(root,
+ ec,
+ join_relids,
+ outer_relids,
+ inner_relids);
+
+ /* Recover if we failed to generate required derived clauses */
+ if (ec->ec_broken)
+ sublist = generate_join_implied_equalities_broken(root,
+ ec,
+ nominal_join_relids,
+ outer_relids,
+ nominal_inner_relids,
+ inner_rel);
+
+ result = list_concat(result, sublist);
+ }
+
+ return result;
+}
+
+/*
+ * generate_join_implied_equalities for a still-valid EC
+ */
+static List *
+generate_join_implied_equalities_normal(PlannerInfo *root,
+ EquivalenceClass *ec,
+ Relids join_relids,
+ Relids outer_relids,
+ Relids inner_relids)
+{
+ List *result = NIL;
+ List *new_members = NIL;
+ List *outer_members = NIL;
+ List *inner_members = NIL;
+ ListCell *lc1;
+
+ /*
+ * First, scan the EC to identify member values that are computable at the
+ * outer rel, at the inner rel, or at this relation but not in either
+ * input rel. The outer-rel members should already be enforced equal,
+ * likewise for the inner-rel members. We'll need to create clauses to
+ * enforce that any newly computable members are all equal to each other
+ * as well as to at least one input member, plus enforce at least one
+ * outer-rel member equal to at least one inner-rel member.
+ */
+ foreach(lc1, ec->ec_members)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc1);
+
+ /*
+ * We don't need to check explicitly for child EC members. This test
+ * against join_relids will cause them to be ignored except when
+ * considering a child inner rel, which is what we want.
+ */
+ if (!bms_is_subset(cur_em->em_relids, join_relids))
+ continue; /* not computable yet, or wrong child */
+
+ if (bms_is_subset(cur_em->em_relids, outer_relids))
+ outer_members = lappend(outer_members, cur_em);
+ else if (bms_is_subset(cur_em->em_relids, inner_relids))
+ inner_members = lappend(inner_members, cur_em);
+ else
+ new_members = lappend(new_members, cur_em);
+ }
+
+ /*
+ * First, select the joinclause if needed. We can equate any one outer
+ * member to any one inner member, but we have to find a datatype
+ * combination for which an opfamily member operator exists. If we have
+ * choices, we prefer simple Var members (possibly with RelabelType) since
+ * these are (a) cheapest to compute at runtime and (b) most likely to
+ * have useful statistics. Also, prefer operators that are also
+ * hashjoinable.
+ */
+ if (outer_members && inner_members)
+ {
+ EquivalenceMember *best_outer_em = NULL;
+ EquivalenceMember *best_inner_em = NULL;
+ Oid best_eq_op = InvalidOid;
+ int best_score = -1;
+ RestrictInfo *rinfo;
+
+ foreach(lc1, outer_members)
+ {
+ EquivalenceMember *outer_em = (EquivalenceMember *) lfirst(lc1);
+ ListCell *lc2;
+
+ foreach(lc2, inner_members)
+ {
+ EquivalenceMember *inner_em = (EquivalenceMember *) lfirst(lc2);
+ Oid eq_op;
+ int score;
+
+ eq_op = select_equality_operator(ec,
+ outer_em->em_datatype,
+ inner_em->em_datatype);
+ if (!OidIsValid(eq_op))
+ continue;
+ score = 0;
+ if (IsA(outer_em->em_expr, Var) ||
+ (IsA(outer_em->em_expr, RelabelType) &&
+ IsA(((RelabelType *) outer_em->em_expr)->arg, Var)))
+ score++;
+ if (IsA(inner_em->em_expr, Var) ||
+ (IsA(inner_em->em_expr, RelabelType) &&
+ IsA(((RelabelType *) inner_em->em_expr)->arg, Var)))
+ score++;
+ if (op_hashjoinable(eq_op,
+ exprType((Node *) outer_em->em_expr)))
+ score++;
+ if (score > best_score)
+ {
+ best_outer_em = outer_em;
+ best_inner_em = inner_em;
+ best_eq_op = eq_op;
+ best_score = score;
+ if (best_score == 3)
+ break; /* no need to look further */
+ }
+ }
+ if (best_score == 3)
+ break; /* no need to look further */
+ }
+ if (best_score < 0)
+ {
+ /* failed... */
+ ec->ec_broken = true;
+ return NIL;
+ }
+
+ /*
+ * Create clause, setting parent_ec to mark it as redundant with other
+ * joinclauses
+ */
+ rinfo = create_join_clause(root, ec, best_eq_op,
+ best_outer_em, best_inner_em,
+ ec);
+
+ result = lappend(result, rinfo);
+ }
+
+ /*
+ * Now deal with building restrictions for any expressions that involve
+ * Vars from both sides of the join. We have to equate all of these to
+ * each other as well as to at least one old member (if any).
+ *
+ * XXX as in generate_base_implied_equalities_no_const, we could be a lot
+ * smarter here to avoid unnecessary failures in cross-type situations.
+ * For now, use the same left-to-right method used there.
+ */
+ if (new_members)
+ {
+ List *old_members = list_concat(outer_members, inner_members);
+ EquivalenceMember *prev_em = NULL;
+ RestrictInfo *rinfo;
+
+ /* For now, arbitrarily take the first old_member as the one to use */
+ if (old_members)
+ new_members = lappend(new_members, linitial(old_members));
+
+ foreach(lc1, new_members)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc1);
+
+ if (prev_em != NULL)
+ {
+ Oid eq_op;
+
+ eq_op = select_equality_operator(ec,
+ prev_em->em_datatype,
+ cur_em->em_datatype);
+ if (!OidIsValid(eq_op))
+ {
+ /* failed... */
+ ec->ec_broken = true;
+ return NIL;
+ }
+ /* do NOT set parent_ec, this qual is not redundant! */
+ rinfo = create_join_clause(root, ec, eq_op,
+ prev_em, cur_em,
+ NULL);
+
+ result = lappend(result, rinfo);
+ }
+ prev_em = cur_em;
+ }
+ }
+
+ return result;
+}
+
+/*
+ * generate_join_implied_equalities cleanup after failure
+ *
+ * Return any original RestrictInfos that are enforceable at this join.
+ *
+ * In the case of a child inner relation, we have to translate the
+ * original RestrictInfos from parent to child Vars.
+ */
+static List *
+generate_join_implied_equalities_broken(PlannerInfo *root,
+ EquivalenceClass *ec,
+ Relids nominal_join_relids,
+ Relids outer_relids,
+ Relids nominal_inner_relids,
+ RelOptInfo *inner_rel)
+{
+ List *result = NIL;
+ ListCell *lc;
+
+ foreach(lc, ec->ec_sources)
+ {
+ RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(lc);
+ Relids clause_relids = restrictinfo->required_relids;
+
+ if (bms_is_subset(clause_relids, nominal_join_relids) &&
+ !bms_is_subset(clause_relids, outer_relids) &&
+ !bms_is_subset(clause_relids, nominal_inner_relids))
+ result = lappend(result, restrictinfo);
+ }
+
+ /*
+ * If we have to translate, just brute-force apply adjust_appendrel_attrs
+ * to all the RestrictInfos at once. This will result in returning
+ * RestrictInfos that are not listed in ec_derives, but there shouldn't be
+ * any duplication, and it's a sufficiently narrow corner case that we
+ * shouldn't sweat too much over it anyway.
+ *
+ * Since inner_rel might be an indirect descendant of the baserel
+ * mentioned in the ec_sources clauses, we have to be prepared to apply
+ * multiple levels of Var translation.
+ */
+ if (IS_OTHER_REL(inner_rel) && result != NIL)
+ result = (List *) adjust_appendrel_attrs_multilevel(root,
+ (Node *) result,
+ inner_rel->relids,
+ inner_rel->top_parent_relids);
+
+ return result;
+}
+
+
+/*
+ * select_equality_operator
+ * Select a suitable equality operator for comparing two EC members
+ *
+ * Returns InvalidOid if no operator can be found for this datatype combination
+ */
+static Oid
+select_equality_operator(EquivalenceClass *ec, Oid lefttype, Oid righttype)
+{
+ ListCell *lc;
+
+ foreach(lc, ec->ec_opfamilies)
+ {
+ Oid opfamily = lfirst_oid(lc);
+ Oid opno;
+
+ opno = get_opfamily_member(opfamily, lefttype, righttype,
+ BTEqualStrategyNumber);
+ if (!OidIsValid(opno))
+ continue;
+ /* If no barrier quals in query, don't worry about leaky operators */
+ if (ec->ec_max_security == 0)
+ return opno;
+ /* Otherwise, insist that selected operators be leakproof */
+ if (get_func_leakproof(get_opcode(opno)))
+ return opno;
+ }
+ return InvalidOid;
+}
+
+
+/*
+ * create_join_clause
+ * Find or make a RestrictInfo comparing the two given EC members
+ * with the given operator.
+ *
+ * parent_ec is either equal to ec (if the clause is a potentially-redundant
+ * join clause) or NULL (if not). We have to treat this as part of the
+ * match requirements --- it's possible that a clause comparing the same two
+ * EMs is a join clause in one join path and a restriction clause in another.
+ */
+static RestrictInfo *
+create_join_clause(PlannerInfo *root,
+ EquivalenceClass *ec, Oid opno,
+ EquivalenceMember *leftem,
+ EquivalenceMember *rightem,
+ EquivalenceClass *parent_ec)
+{
+ RestrictInfo *rinfo;
+ ListCell *lc;
+ MemoryContext oldcontext;
+
+ /*
+ * Search to see if we already built a RestrictInfo for this pair of
+ * EquivalenceMembers. We can use either original source clauses or
+ * previously-derived clauses. The check on opno is probably redundant,
+ * but be safe ...
+ */
+ foreach(lc, ec->ec_sources)
+ {
+ rinfo = (RestrictInfo *) lfirst(lc);
+ if (rinfo->left_em == leftem &&
+ rinfo->right_em == rightem &&
+ rinfo->parent_ec == parent_ec &&
+ opno == ((OpExpr *) rinfo->clause)->opno)
+ return rinfo;
+ }
+
+ foreach(lc, ec->ec_derives)
+ {
+ rinfo = (RestrictInfo *) lfirst(lc);
+ if (rinfo->left_em == leftem &&
+ rinfo->right_em == rightem &&
+ rinfo->parent_ec == parent_ec &&
+ opno == ((OpExpr *) rinfo->clause)->opno)
+ return rinfo;
+ }
+
+ /*
+ * Not there, so build it, in planner context so we can re-use it. (Not
+ * important in normal planning, but definitely so in GEQO.)
+ */
+ oldcontext = MemoryContextSwitchTo(root->planner_cxt);
+
+ rinfo = build_implied_join_equality(root,
+ opno,
+ ec->ec_collation,
+ leftem->em_expr,
+ rightem->em_expr,
+ bms_union(leftem->em_relids,
+ rightem->em_relids),
+ bms_union(leftem->em_nullable_relids,
+ rightem->em_nullable_relids),
+ ec->ec_min_security);
+
+ /* Mark the clause as redundant, or not */
+ rinfo->parent_ec = parent_ec;
+
+ /*
+ * We know the correct values for left_ec/right_ec, ie this particular EC,
+ * so we can just set them directly instead of forcing another lookup.
+ */
+ rinfo->left_ec = ec;
+ rinfo->right_ec = ec;
+
+ /* Mark it as usable with these EMs */
+ rinfo->left_em = leftem;
+ rinfo->right_em = rightem;
+ /* and save it for possible re-use */
+ ec->ec_derives = lappend(ec->ec_derives, rinfo);
+
+ MemoryContextSwitchTo(oldcontext);
+
+ return rinfo;
+}
+
+
+/*
+ * reconsider_outer_join_clauses
+ * Re-examine any outer-join clauses that were set aside by
+ * distribute_qual_to_rels(), and see if we can derive any
+ * EquivalenceClasses from them. Then, if they were not made
+ * redundant, push them out into the regular join-clause lists.
+ *
+ * When we have mergejoinable clauses A = B that are outer-join clauses,
+ * we can't blindly combine them with other clauses A = C to deduce B = C,
+ * since in fact the "equality" A = B won't necessarily hold above the
+ * outer join (one of the variables might be NULL instead). Nonetheless
+ * there are cases where we can add qual clauses using transitivity.
+ *
+ * One case that we look for here is an outer-join clause OUTERVAR = INNERVAR
+ * for which there is also an equivalence clause OUTERVAR = CONSTANT.
+ * It is safe and useful to push a clause INNERVAR = CONSTANT into the
+ * evaluation of the inner (nullable) relation, because any inner rows not
+ * meeting this condition will not contribute to the outer-join result anyway.
+ * (Any outer rows they could join to will be eliminated by the pushed-down
+ * equivalence clause.)
+ *
+ * Note that the above rule does not work for full outer joins; nor is it
+ * very interesting to consider cases where the generated equivalence clause
+ * would involve relations outside the outer join, since such clauses couldn't
+ * be pushed into the inner side's scan anyway. So the restriction to
+ * outervar = pseudoconstant is not really giving up anything.
+ *
+ * For full-join cases, we can only do something useful if it's a FULL JOIN
+ * USING and a merged column has an equivalence MERGEDVAR = CONSTANT.
+ * By the time it gets here, the merged column will look like
+ * COALESCE(LEFTVAR, RIGHTVAR)
+ * and we will have a full-join clause LEFTVAR = RIGHTVAR that we can match
+ * the COALESCE expression to. In this situation we can push LEFTVAR = CONSTANT
+ * and RIGHTVAR = CONSTANT into the input relations, since any rows not
+ * meeting these conditions cannot contribute to the join result.
+ *
+ * Again, there isn't any traction to be gained by trying to deal with
+ * clauses comparing a mergedvar to a non-pseudoconstant. So we can make
+ * use of the EquivalenceClasses to search for matching variables that were
+ * equivalenced to constants. The interesting outer-join clauses were
+ * accumulated for us by distribute_qual_to_rels.
+ *
+ * When we find one of these cases, we implement the changes we want by
+ * generating a new equivalence clause INNERVAR = CONSTANT (or LEFTVAR, etc)
+ * and pushing it into the EquivalenceClass structures. This is because we
+ * may already know that INNERVAR is equivalenced to some other var(s), and
+ * we'd like the constant to propagate to them too. Note that it would be
+ * unsafe to merge any existing EC for INNERVAR with the OUTERVAR's EC ---
+ * that could result in propagating constant restrictions from
+ * INNERVAR to OUTERVAR, which would be very wrong.
+ *
+ * It's possible that the INNERVAR is also an OUTERVAR for some other
+ * outer-join clause, in which case the process can be repeated. So we repeat
+ * looping over the lists of clauses until no further deductions can be made.
+ * Whenever we do make a deduction, we remove the generating clause from the
+ * lists, since we don't want to make the same deduction twice.
+ *
+ * If we don't find any match for a set-aside outer join clause, we must
+ * throw it back into the regular joinclause processing by passing it to
+ * distribute_restrictinfo_to_rels(). If we do generate a derived clause,
+ * however, the outer-join clause is redundant. We still throw it back,
+ * because otherwise the join will be seen as a clauseless join and avoided
+ * during join order searching; but we mark it as redundant to keep from
+ * messing up the joinrel's size estimate. (This behavior means that the
+ * API for this routine is uselessly complex: we could have just put all
+ * the clauses into the regular processing initially. We keep it because
+ * someday we might want to do something else, such as inserting "dummy"
+ * joinclauses instead of real ones.)
+ *
+ * Outer join clauses that are marked outerjoin_delayed are special: this
+ * condition means that one or both VARs might go to null due to a lower
+ * outer join. We can still push a constant through the clause, but only
+ * if its operator is strict; and we *have to* throw the clause back into
+ * regular joinclause processing. By keeping the strict join clause,
+ * we ensure that any null-extended rows that are mistakenly generated due
+ * to suppressing rows not matching the constant will be rejected at the
+ * upper outer join. (This doesn't work for full-join clauses.)
+ */
+void
+reconsider_outer_join_clauses(PlannerInfo *root)
+{
+ bool found;
+ ListCell *cell;
+
+ /* Outer loop repeats until we find no more deductions */
+ do
+ {
+ found = false;
+
+ /* Process the LEFT JOIN clauses */
+ foreach(cell, root->left_join_clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
+
+ if (reconsider_outer_join_clause(root, rinfo, true))
+ {
+ found = true;
+ /* remove it from the list */
+ root->left_join_clauses =
+ foreach_delete_current(root->left_join_clauses, cell);
+ /* we throw it back anyway (see notes above) */
+ /* but the thrown-back clause has no extra selectivity */
+ rinfo->norm_selec = 2.0;
+ rinfo->outer_selec = 1.0;
+ distribute_restrictinfo_to_rels(root, rinfo);
+ }
+ }
+
+ /* Process the RIGHT JOIN clauses */
+ foreach(cell, root->right_join_clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
+
+ if (reconsider_outer_join_clause(root, rinfo, false))
+ {
+ found = true;
+ /* remove it from the list */
+ root->right_join_clauses =
+ foreach_delete_current(root->right_join_clauses, cell);
+ /* we throw it back anyway (see notes above) */
+ /* but the thrown-back clause has no extra selectivity */
+ rinfo->norm_selec = 2.0;
+ rinfo->outer_selec = 1.0;
+ distribute_restrictinfo_to_rels(root, rinfo);
+ }
+ }
+
+ /* Process the FULL JOIN clauses */
+ foreach(cell, root->full_join_clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
+
+ if (reconsider_full_join_clause(root, rinfo))
+ {
+ found = true;
+ /* remove it from the list */
+ root->full_join_clauses =
+ foreach_delete_current(root->full_join_clauses, cell);
+ /* we throw it back anyway (see notes above) */
+ /* but the thrown-back clause has no extra selectivity */
+ rinfo->norm_selec = 2.0;
+ rinfo->outer_selec = 1.0;
+ distribute_restrictinfo_to_rels(root, rinfo);
+ }
+ }
+ } while (found);
+
+ /* Now, any remaining clauses have to be thrown back */
+ foreach(cell, root->left_join_clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
+
+ distribute_restrictinfo_to_rels(root, rinfo);
+ }
+ foreach(cell, root->right_join_clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
+
+ distribute_restrictinfo_to_rels(root, rinfo);
+ }
+ foreach(cell, root->full_join_clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
+
+ distribute_restrictinfo_to_rels(root, rinfo);
+ }
+}
+
+/*
+ * reconsider_outer_join_clauses for a single LEFT/RIGHT JOIN clause
+ *
+ * Returns true if we were able to propagate a constant through the clause.
+ */
+static bool
+reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo,
+ bool outer_on_left)
+{
+ Expr *outervar,
+ *innervar;
+ Oid opno,
+ collation,
+ left_type,
+ right_type,
+ inner_datatype;
+ Relids inner_relids,
+ inner_nullable_relids;
+ ListCell *lc1;
+
+ Assert(is_opclause(rinfo->clause));
+ opno = ((OpExpr *) rinfo->clause)->opno;
+ collation = ((OpExpr *) rinfo->clause)->inputcollid;
+
+ /* If clause is outerjoin_delayed, operator must be strict */
+ if (rinfo->outerjoin_delayed && !op_strict(opno))
+ return false;
+
+ /* Extract needed info from the clause */
+ op_input_types(opno, &left_type, &right_type);
+ if (outer_on_left)
+ {
+ outervar = (Expr *) get_leftop(rinfo->clause);
+ innervar = (Expr *) get_rightop(rinfo->clause);
+ inner_datatype = right_type;
+ inner_relids = rinfo->right_relids;
+ }
+ else
+ {
+ outervar = (Expr *) get_rightop(rinfo->clause);
+ innervar = (Expr *) get_leftop(rinfo->clause);
+ inner_datatype = left_type;
+ inner_relids = rinfo->left_relids;
+ }
+ inner_nullable_relids = bms_intersect(inner_relids,
+ rinfo->nullable_relids);
+
+ /* Scan EquivalenceClasses for a match to outervar */
+ foreach(lc1, root->eq_classes)
+ {
+ EquivalenceClass *cur_ec = (EquivalenceClass *) lfirst(lc1);
+ bool match;
+ ListCell *lc2;
+
+ /* Ignore EC unless it contains pseudoconstants */
+ if (!cur_ec->ec_has_const)
+ continue;
+ /* Never match to a volatile EC */
+ if (cur_ec->ec_has_volatile)
+ continue;
+ /* It has to match the outer-join clause as to semantics, too */
+ if (collation != cur_ec->ec_collation)
+ continue;
+ if (!equal(rinfo->mergeopfamilies, cur_ec->ec_opfamilies))
+ continue;
+ /* Does it contain a match to outervar? */
+ match = false;
+ foreach(lc2, cur_ec->ec_members)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc2);
+
+ Assert(!cur_em->em_is_child); /* no children yet */
+ if (equal(outervar, cur_em->em_expr))
+ {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ continue; /* no match, so ignore this EC */
+
+ /*
+ * Yes it does! Try to generate a clause INNERVAR = CONSTANT for each
+ * CONSTANT in the EC. Note that we must succeed with at least one
+ * constant before we can decide to throw away the outer-join clause.
+ */
+ match = false;
+ foreach(lc2, cur_ec->ec_members)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc2);
+ Oid eq_op;
+ RestrictInfo *newrinfo;
+
+ if (!cur_em->em_is_const)
+ continue; /* ignore non-const members */
+ eq_op = select_equality_operator(cur_ec,
+ inner_datatype,
+ cur_em->em_datatype);
+ if (!OidIsValid(eq_op))
+ continue; /* can't generate equality */
+ newrinfo = build_implied_join_equality(root,
+ eq_op,
+ cur_ec->ec_collation,
+ innervar,
+ cur_em->em_expr,
+ bms_copy(inner_relids),
+ bms_copy(inner_nullable_relids),
+ cur_ec->ec_min_security);
+ if (process_equivalence(root, &newrinfo, true))
+ match = true;
+ }
+
+ /*
+ * If we were able to equate INNERVAR to any constant, report success.
+ * Otherwise, fall out of the search loop, since we know the OUTERVAR
+ * appears in at most one EC.
+ */
+ if (match)
+ return true;
+ else
+ break;
+ }
+
+ return false; /* failed to make any deduction */
+}
+
+/*
+ * reconsider_outer_join_clauses for a single FULL JOIN clause
+ *
+ * Returns true if we were able to propagate a constant through the clause.
+ */
+static bool
+reconsider_full_join_clause(PlannerInfo *root, RestrictInfo *rinfo)
+{
+ Expr *leftvar;
+ Expr *rightvar;
+ Oid opno,
+ collation,
+ left_type,
+ right_type;
+ Relids left_relids,
+ right_relids,
+ left_nullable_relids,
+ right_nullable_relids;
+ ListCell *lc1;
+
+ /* Can't use an outerjoin_delayed clause here */
+ if (rinfo->outerjoin_delayed)
+ return false;
+
+ /* Extract needed info from the clause */
+ Assert(is_opclause(rinfo->clause));
+ opno = ((OpExpr *) rinfo->clause)->opno;
+ collation = ((OpExpr *) rinfo->clause)->inputcollid;
+ op_input_types(opno, &left_type, &right_type);
+ leftvar = (Expr *) get_leftop(rinfo->clause);
+ rightvar = (Expr *) get_rightop(rinfo->clause);
+ left_relids = rinfo->left_relids;
+ right_relids = rinfo->right_relids;
+ left_nullable_relids = bms_intersect(left_relids,
+ rinfo->nullable_relids);
+ right_nullable_relids = bms_intersect(right_relids,
+ rinfo->nullable_relids);
+
+ foreach(lc1, root->eq_classes)
+ {
+ EquivalenceClass *cur_ec = (EquivalenceClass *) lfirst(lc1);
+ EquivalenceMember *coal_em = NULL;
+ bool match;
+ bool matchleft;
+ bool matchright;
+ ListCell *lc2;
+ int coal_idx = -1;
+
+ /* Ignore EC unless it contains pseudoconstants */
+ if (!cur_ec->ec_has_const)
+ continue;
+ /* Never match to a volatile EC */
+ if (cur_ec->ec_has_volatile)
+ continue;
+ /* It has to match the outer-join clause as to semantics, too */
+ if (collation != cur_ec->ec_collation)
+ continue;
+ if (!equal(rinfo->mergeopfamilies, cur_ec->ec_opfamilies))
+ continue;
+
+ /*
+ * Does it contain a COALESCE(leftvar, rightvar) construct?
+ *
+ * We can assume the COALESCE() inputs are in the same order as the
+ * join clause, since both were automatically generated in the cases
+ * we care about.
+ *
+ * XXX currently this may fail to match in cross-type cases because
+ * the COALESCE will contain typecast operations while the join clause
+ * may not (if there is a cross-type mergejoin operator available for
+ * the two column types). Is it OK to strip implicit coercions from
+ * the COALESCE arguments?
+ */
+ match = false;
+ foreach(lc2, cur_ec->ec_members)
+ {
+ coal_em = (EquivalenceMember *) lfirst(lc2);
+ Assert(!coal_em->em_is_child); /* no children yet */
+ if (IsA(coal_em->em_expr, CoalesceExpr))
+ {
+ CoalesceExpr *cexpr = (CoalesceExpr *) coal_em->em_expr;
+ Node *cfirst;
+ Node *csecond;
+
+ if (list_length(cexpr->args) != 2)
+ continue;
+ cfirst = (Node *) linitial(cexpr->args);
+ csecond = (Node *) lsecond(cexpr->args);
+
+ if (equal(leftvar, cfirst) && equal(rightvar, csecond))
+ {
+ coal_idx = foreach_current_index(lc2);
+ match = true;
+ break;
+ }
+ }
+ }
+ if (!match)
+ continue; /* no match, so ignore this EC */
+
+ /*
+ * Yes it does! Try to generate clauses LEFTVAR = CONSTANT and
+ * RIGHTVAR = CONSTANT for each CONSTANT in the EC. Note that we must
+ * succeed with at least one constant for each var before we can
+ * decide to throw away the outer-join clause.
+ */
+ matchleft = matchright = false;
+ foreach(lc2, cur_ec->ec_members)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc2);
+ Oid eq_op;
+ RestrictInfo *newrinfo;
+
+ if (!cur_em->em_is_const)
+ continue; /* ignore non-const members */
+ eq_op = select_equality_operator(cur_ec,
+ left_type,
+ cur_em->em_datatype);
+ if (OidIsValid(eq_op))
+ {
+ newrinfo = build_implied_join_equality(root,
+ eq_op,
+ cur_ec->ec_collation,
+ leftvar,
+ cur_em->em_expr,
+ bms_copy(left_relids),
+ bms_copy(left_nullable_relids),
+ cur_ec->ec_min_security);
+ if (process_equivalence(root, &newrinfo, true))
+ matchleft = true;
+ }
+ eq_op = select_equality_operator(cur_ec,
+ right_type,
+ cur_em->em_datatype);
+ if (OidIsValid(eq_op))
+ {
+ newrinfo = build_implied_join_equality(root,
+ eq_op,
+ cur_ec->ec_collation,
+ rightvar,
+ cur_em->em_expr,
+ bms_copy(right_relids),
+ bms_copy(right_nullable_relids),
+ cur_ec->ec_min_security);
+ if (process_equivalence(root, &newrinfo, true))
+ matchright = true;
+ }
+ }
+
+ /*
+ * If we were able to equate both vars to constants, we're done, and
+ * we can throw away the full-join clause as redundant. Moreover, we
+ * can remove the COALESCE entry from the EC, since the added
+ * restrictions ensure it will always have the expected value. (We
+ * don't bother trying to update ec_relids or ec_sources.)
+ */
+ if (matchleft && matchright)
+ {
+ cur_ec->ec_members = list_delete_nth_cell(cur_ec->ec_members, coal_idx);
+ return true;
+ }
+
+ /*
+ * Otherwise, fall out of the search loop, since we know the COALESCE
+ * appears in at most one EC (XXX might stop being true if we allow
+ * stripping of coercions above?)
+ */
+ break;
+ }
+
+ return false; /* failed to make any deduction */
+}
+
+
+/*
+ * exprs_known_equal
+ * Detect whether two expressions are known equal due to equivalence
+ * relationships.
+ *
+ * Actually, this only shows that the expressions are equal according
+ * to some opfamily's notion of equality --- but we only use it for
+ * selectivity estimation, so a fuzzy idea of equality is OK.
+ *
+ * Note: does not bother to check for "equal(item1, item2)"; caller must
+ * check that case if it's possible to pass identical items.
+ */
+bool
+exprs_known_equal(PlannerInfo *root, Node *item1, Node *item2)
+{
+ ListCell *lc1;
+
+ foreach(lc1, root->eq_classes)
+ {
+ EquivalenceClass *ec = (EquivalenceClass *) lfirst(lc1);
+ bool item1member = false;
+ bool item2member = false;
+ ListCell *lc2;
+
+ /* Never match to a volatile EC */
+ if (ec->ec_has_volatile)
+ continue;
+
+ foreach(lc2, ec->ec_members)
+ {
+ EquivalenceMember *em = (EquivalenceMember *) lfirst(lc2);
+
+ if (em->em_is_child)
+ continue; /* ignore children here */
+ if (equal(item1, em->em_expr))
+ item1member = true;
+ else if (equal(item2, em->em_expr))
+ item2member = true;
+ /* Exit as soon as equality is proven */
+ if (item1member && item2member)
+ return true;
+ }
+ }
+ return false;
+}
+
+
+/*
+ * match_eclasses_to_foreign_key_col
+ * See whether a foreign key column match is proven by any eclass.
+ *
+ * If the referenced and referencing Vars of the fkey's colno'th column are
+ * known equal due to any eclass, return that eclass; otherwise return NULL.
+ * (In principle there might be more than one matching eclass if multiple
+ * collations are involved, but since collation doesn't matter for equality,
+ * we ignore that fine point here.) This is much like exprs_known_equal,
+ * except that we insist on the comparison operator matching the eclass, so
+ * that the result is definite not approximate.
+ *
+ * On success, we also set fkinfo->eclass[colno] to the matching eclass,
+ * and set fkinfo->fk_eclass_member[colno] to the eclass member for the
+ * referencing Var.
+ */
+EquivalenceClass *
+match_eclasses_to_foreign_key_col(PlannerInfo *root,
+ ForeignKeyOptInfo *fkinfo,
+ int colno)
+{
+ Index var1varno = fkinfo->con_relid;
+ AttrNumber var1attno = fkinfo->conkey[colno];
+ Index var2varno = fkinfo->ref_relid;
+ AttrNumber var2attno = fkinfo->confkey[colno];
+ Oid eqop = fkinfo->conpfeqop[colno];
+ RelOptInfo *rel1 = root->simple_rel_array[var1varno];
+ RelOptInfo *rel2 = root->simple_rel_array[var2varno];
+ List *opfamilies = NIL; /* compute only if needed */
+ Bitmapset *matching_ecs;
+ int i;
+
+ /* Consider only eclasses mentioning both relations */
+ Assert(root->ec_merging_done);
+ Assert(IS_SIMPLE_REL(rel1));
+ Assert(IS_SIMPLE_REL(rel2));
+ matching_ecs = bms_intersect(rel1->eclass_indexes,
+ rel2->eclass_indexes);
+
+ i = -1;
+ while ((i = bms_next_member(matching_ecs, i)) >= 0)
+ {
+ EquivalenceClass *ec = (EquivalenceClass *) list_nth(root->eq_classes,
+ i);
+ EquivalenceMember *item1_em = NULL;
+ EquivalenceMember *item2_em = NULL;
+ ListCell *lc2;
+
+ /* Never match to a volatile EC */
+ if (ec->ec_has_volatile)
+ continue;
+ /* Note: it seems okay to match to "broken" eclasses here */
+
+ foreach(lc2, ec->ec_members)
+ {
+ EquivalenceMember *em = (EquivalenceMember *) lfirst(lc2);
+ Var *var;
+
+ if (em->em_is_child)
+ continue; /* ignore children here */
+
+ /* EM must be a Var, possibly with RelabelType */
+ var = (Var *) em->em_expr;
+ while (var && IsA(var, RelabelType))
+ var = (Var *) ((RelabelType *) var)->arg;
+ if (!(var && IsA(var, Var)))
+ continue;
+
+ /* Match? */
+ if (var->varno == var1varno && var->varattno == var1attno)
+ item1_em = em;
+ else if (var->varno == var2varno && var->varattno == var2attno)
+ item2_em = em;
+
+ /* Have we found both PK and FK column in this EC? */
+ if (item1_em && item2_em)
+ {
+ /*
+ * Succeed if eqop matches EC's opfamilies. We could test
+ * this before scanning the members, but it's probably cheaper
+ * to test for member matches first.
+ */
+ if (opfamilies == NIL) /* compute if we didn't already */
+ opfamilies = get_mergejoin_opfamilies(eqop);
+ if (equal(opfamilies, ec->ec_opfamilies))
+ {
+ fkinfo->eclass[colno] = ec;
+ fkinfo->fk_eclass_member[colno] = item2_em;
+ return ec;
+ }
+ /* Otherwise, done with this EC, move on to the next */
+ break;
+ }
+ }
+ }
+ return NULL;
+}
+
+/*
+ * find_derived_clause_for_ec_member
+ * Search for a previously-derived clause mentioning the given EM.
+ *
+ * The eclass should be an ec_has_const EC, of which the EM is a non-const
+ * member. This should ensure there is just one derived clause mentioning
+ * the EM (and equating it to a constant).
+ * Returns NULL if no such clause can be found.
+ */
+RestrictInfo *
+find_derived_clause_for_ec_member(EquivalenceClass *ec,
+ EquivalenceMember *em)
+{
+ ListCell *lc;
+
+ Assert(ec->ec_has_const);
+ Assert(!em->em_is_const);
+ foreach(lc, ec->ec_derives)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ /*
+ * generate_base_implied_equalities_const will have put non-const
+ * members on the left side of derived clauses.
+ */
+ if (rinfo->left_em == em)
+ return rinfo;
+ }
+ return NULL;
+}
+
+
+/*
+ * add_child_rel_equivalences
+ * Search for EC members that reference the root parent of child_rel, and
+ * add transformed members referencing the child_rel.
+ *
+ * Note that this function won't be called at all unless we have at least some
+ * reason to believe that the EC members it generates will be useful.
+ *
+ * parent_rel and child_rel could be derived from appinfo, but since the
+ * caller has already computed them, we might as well just pass them in.
+ *
+ * The passed-in AppendRelInfo is not used when the parent_rel is not a
+ * top-level baserel, since it shows the mapping from the parent_rel but
+ * we need to translate EC expressions that refer to the top-level parent.
+ * Using it is faster than using adjust_appendrel_attrs_multilevel(), though,
+ * so we prefer it when we can.
+ */
+void
+add_child_rel_equivalences(PlannerInfo *root,
+ AppendRelInfo *appinfo,
+ RelOptInfo *parent_rel,
+ RelOptInfo *child_rel)
+{
+ Relids top_parent_relids = child_rel->top_parent_relids;
+ Relids child_relids = child_rel->relids;
+ int i;
+
+ /*
+ * EC merging should be complete already, so we can use the parent rel's
+ * eclass_indexes to avoid searching all of root->eq_classes.
+ */
+ Assert(root->ec_merging_done);
+ Assert(IS_SIMPLE_REL(parent_rel));
+
+ i = -1;
+ while ((i = bms_next_member(parent_rel->eclass_indexes, i)) >= 0)
+ {
+ EquivalenceClass *cur_ec = (EquivalenceClass *) list_nth(root->eq_classes, i);
+ int num_members;
+
+ /*
+ * If this EC contains a volatile expression, then generating child
+ * EMs would be downright dangerous, so skip it. We rely on a
+ * volatile EC having only one EM.
+ */
+ if (cur_ec->ec_has_volatile)
+ continue;
+
+ /* Sanity check eclass_indexes only contain ECs for parent_rel */
+ Assert(bms_is_subset(top_parent_relids, cur_ec->ec_relids));
+
+ /*
+ * We don't use foreach() here because there's no point in scanning
+ * newly-added child members, so we can stop after the last
+ * pre-existing EC member.
+ */
+ num_members = list_length(cur_ec->ec_members);
+ for (int pos = 0; pos < num_members; pos++)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) list_nth(cur_ec->ec_members, pos);
+
+ if (cur_em->em_is_const)
+ continue; /* ignore consts here */
+
+ /*
+ * We consider only original EC members here, not
+ * already-transformed child members. Otherwise, if some original
+ * member expression references more than one appendrel, we'd get
+ * an O(N^2) explosion of useless derived expressions for
+ * combinations of children. (But add_child_join_rel_equivalences
+ * may add targeted combinations for partitionwise-join purposes.)
+ */
+ if (cur_em->em_is_child)
+ continue; /* ignore children here */
+
+ /* Does this member reference child's topmost parent rel? */
+ if (bms_overlap(cur_em->em_relids, top_parent_relids))
+ {
+ /* Yes, generate transformed child version */
+ Expr *child_expr;
+ Relids new_relids;
+ Relids new_nullable_relids;
+
+ if (parent_rel->reloptkind == RELOPT_BASEREL)
+ {
+ /* Simple single-level transformation */
+ child_expr = (Expr *)
+ adjust_appendrel_attrs(root,
+ (Node *) cur_em->em_expr,
+ 1, &appinfo);
+ }
+ else
+ {
+ /* Must do multi-level transformation */
+ child_expr = (Expr *)
+ adjust_appendrel_attrs_multilevel(root,
+ (Node *) cur_em->em_expr,
+ child_relids,
+ top_parent_relids);
+ }
+
+ /*
+ * Transform em_relids to match. Note we do *not* do
+ * pull_varnos(child_expr) here, as for example the
+ * transformation might have substituted a constant, but we
+ * don't want the child member to be marked as constant.
+ */
+ new_relids = bms_difference(cur_em->em_relids,
+ top_parent_relids);
+ new_relids = bms_add_members(new_relids, child_relids);
+
+ /*
+ * And likewise for nullable_relids. Note this code assumes
+ * parent and child relids are singletons.
+ */
+ new_nullable_relids = cur_em->em_nullable_relids;
+ if (bms_overlap(new_nullable_relids, top_parent_relids))
+ {
+ new_nullable_relids = bms_difference(new_nullable_relids,
+ top_parent_relids);
+ new_nullable_relids = bms_add_members(new_nullable_relids,
+ child_relids);
+ }
+
+ (void) add_eq_member(cur_ec, child_expr,
+ new_relids, new_nullable_relids,
+ true, cur_em->em_datatype);
+
+ /* Record this EC index for the child rel */
+ child_rel->eclass_indexes = bms_add_member(child_rel->eclass_indexes, i);
+ }
+ }
+ }
+}
+
+/*
+ * add_child_join_rel_equivalences
+ * Like add_child_rel_equivalences(), but for joinrels
+ *
+ * Here we find the ECs relevant to the top parent joinrel and add transformed
+ * member expressions that refer to this child joinrel.
+ *
+ * Note that this function won't be called at all unless we have at least some
+ * reason to believe that the EC members it generates will be useful.
+ */
+void
+add_child_join_rel_equivalences(PlannerInfo *root,
+ int nappinfos, AppendRelInfo **appinfos,
+ RelOptInfo *parent_joinrel,
+ RelOptInfo *child_joinrel)
+{
+ Relids top_parent_relids = child_joinrel->top_parent_relids;
+ Relids child_relids = child_joinrel->relids;
+ Bitmapset *matching_ecs;
+ MemoryContext oldcontext;
+ int i;
+
+ Assert(IS_JOIN_REL(child_joinrel) && IS_JOIN_REL(parent_joinrel));
+
+ /* We need consider only ECs that mention the parent joinrel */
+ matching_ecs = get_eclass_indexes_for_relids(root, top_parent_relids);
+
+ /*
+ * If we're being called during GEQO join planning, we still have to
+ * create any new EC members in the main planner context, to avoid having
+ * a corrupt EC data structure after the GEQO context is reset. This is
+ * problematic since we'll leak memory across repeated GEQO cycles. For
+ * now, though, bloat is better than crash. If it becomes a real issue
+ * we'll have to do something to avoid generating duplicate EC members.
+ */
+ oldcontext = MemoryContextSwitchTo(root->planner_cxt);
+
+ i = -1;
+ while ((i = bms_next_member(matching_ecs, i)) >= 0)
+ {
+ EquivalenceClass *cur_ec = (EquivalenceClass *) list_nth(root->eq_classes, i);
+ int num_members;
+
+ /*
+ * If this EC contains a volatile expression, then generating child
+ * EMs would be downright dangerous, so skip it. We rely on a
+ * volatile EC having only one EM.
+ */
+ if (cur_ec->ec_has_volatile)
+ continue;
+
+ /* Sanity check on get_eclass_indexes_for_relids result */
+ Assert(bms_overlap(top_parent_relids, cur_ec->ec_relids));
+
+ /*
+ * We don't use foreach() here because there's no point in scanning
+ * newly-added child members, so we can stop after the last
+ * pre-existing EC member.
+ */
+ num_members = list_length(cur_ec->ec_members);
+ for (int pos = 0; pos < num_members; pos++)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) list_nth(cur_ec->ec_members, pos);
+
+ if (cur_em->em_is_const)
+ continue; /* ignore consts here */
+
+ /*
+ * We consider only original EC members here, not
+ * already-transformed child members.
+ */
+ if (cur_em->em_is_child)
+ continue; /* ignore children here */
+
+ /*
+ * We may ignore expressions that reference a single baserel,
+ * because add_child_rel_equivalences should have handled them.
+ */
+ if (bms_membership(cur_em->em_relids) != BMS_MULTIPLE)
+ continue;
+
+ /* Does this member reference child's topmost parent rel? */
+ if (bms_overlap(cur_em->em_relids, top_parent_relids))
+ {
+ /* Yes, generate transformed child version */
+ Expr *child_expr;
+ Relids new_relids;
+ Relids new_nullable_relids;
+
+ if (parent_joinrel->reloptkind == RELOPT_JOINREL)
+ {
+ /* Simple single-level transformation */
+ child_expr = (Expr *)
+ adjust_appendrel_attrs(root,
+ (Node *) cur_em->em_expr,
+ nappinfos, appinfos);
+ }
+ else
+ {
+ /* Must do multi-level transformation */
+ Assert(parent_joinrel->reloptkind == RELOPT_OTHER_JOINREL);
+ child_expr = (Expr *)
+ adjust_appendrel_attrs_multilevel(root,
+ (Node *) cur_em->em_expr,
+ child_relids,
+ top_parent_relids);
+ }
+
+ /*
+ * Transform em_relids to match. Note we do *not* do
+ * pull_varnos(child_expr) here, as for example the
+ * transformation might have substituted a constant, but we
+ * don't want the child member to be marked as constant.
+ */
+ new_relids = bms_difference(cur_em->em_relids,
+ top_parent_relids);
+ new_relids = bms_add_members(new_relids, child_relids);
+
+ /*
+ * For nullable_relids, we must selectively replace parent
+ * nullable relids with child ones.
+ */
+ new_nullable_relids = cur_em->em_nullable_relids;
+ if (bms_overlap(new_nullable_relids, top_parent_relids))
+ new_nullable_relids =
+ adjust_child_relids_multilevel(root,
+ new_nullable_relids,
+ child_relids,
+ top_parent_relids);
+
+ (void) add_eq_member(cur_ec, child_expr,
+ new_relids, new_nullable_relids,
+ true, cur_em->em_datatype);
+ }
+ }
+ }
+
+ MemoryContextSwitchTo(oldcontext);
+}
+
+
+/*
+ * generate_implied_equalities_for_column
+ * Create EC-derived joinclauses usable with a specific column.
+ *
+ * This is used by indxpath.c to extract potentially indexable joinclauses
+ * from ECs, and can be used by foreign data wrappers for similar purposes.
+ * We assume that only expressions in Vars of a single table are of interest,
+ * but the caller provides a callback function to identify exactly which
+ * such expressions it would like to know about.
+ *
+ * We assume that any given table/index column could appear in only one EC.
+ * (This should be true in all but the most pathological cases, and if it
+ * isn't, we stop on the first match anyway.) Therefore, what we return
+ * is a redundant list of clauses equating the table/index column to each of
+ * the other-relation values it is known to be equal to. Any one of
+ * these clauses can be used to create a parameterized path, and there
+ * is no value in using more than one. (But it *is* worthwhile to create
+ * a separate parameterized path for each one, since that leads to different
+ * join orders.)
+ *
+ * The caller can pass a Relids set of rels we aren't interested in joining
+ * to, so as to save the work of creating useless clauses.
+ */
+List *
+generate_implied_equalities_for_column(PlannerInfo *root,
+ RelOptInfo *rel,
+ ec_matches_callback_type callback,
+ void *callback_arg,
+ Relids prohibited_rels)
+{
+ List *result = NIL;
+ bool is_child_rel = (rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
+ Relids parent_relids;
+ int i;
+
+ /* Should be OK to rely on eclass_indexes */
+ Assert(root->ec_merging_done);
+
+ /* Indexes are available only on base or "other" member relations. */
+ Assert(IS_SIMPLE_REL(rel));
+
+ /* If it's a child rel, we'll need to know what its parent(s) are */
+ if (is_child_rel)
+ parent_relids = find_childrel_parents(root, rel);
+ else
+ parent_relids = NULL; /* not used, but keep compiler quiet */
+
+ i = -1;
+ while ((i = bms_next_member(rel->eclass_indexes, i)) >= 0)
+ {
+ EquivalenceClass *cur_ec = (EquivalenceClass *) list_nth(root->eq_classes, i);
+ EquivalenceMember *cur_em;
+ ListCell *lc2;
+
+ /* Sanity check eclass_indexes only contain ECs for rel */
+ Assert(is_child_rel || bms_is_subset(rel->relids, cur_ec->ec_relids));
+
+ /*
+ * Won't generate joinclauses if const or single-member (the latter
+ * test covers the volatile case too)
+ */
+ if (cur_ec->ec_has_const || list_length(cur_ec->ec_members) <= 1)
+ continue;
+
+ /*
+ * Scan members, looking for a match to the target column. Note that
+ * child EC members are considered, but only when they belong to the
+ * target relation. (Unlike regular members, the same expression
+ * could be a child member of more than one EC. Therefore, it's
+ * potentially order-dependent which EC a child relation's target
+ * column gets matched to. This is annoying but it only happens in
+ * corner cases, so for now we live with just reporting the first
+ * match. See also get_eclass_for_sort_expr.)
+ */
+ cur_em = NULL;
+ foreach(lc2, cur_ec->ec_members)
+ {
+ cur_em = (EquivalenceMember *) lfirst(lc2);
+ if (bms_equal(cur_em->em_relids, rel->relids) &&
+ callback(root, rel, cur_ec, cur_em, callback_arg))
+ break;
+ cur_em = NULL;
+ }
+
+ if (!cur_em)
+ continue;
+
+ /*
+ * Found our match. Scan the other EC members and attempt to generate
+ * joinclauses.
+ */
+ foreach(lc2, cur_ec->ec_members)
+ {
+ EquivalenceMember *other_em = (EquivalenceMember *) lfirst(lc2);
+ Oid eq_op;
+ RestrictInfo *rinfo;
+
+ if (other_em->em_is_child)
+ continue; /* ignore children here */
+
+ /* Make sure it'll be a join to a different rel */
+ if (other_em == cur_em ||
+ bms_overlap(other_em->em_relids, rel->relids))
+ continue;
+
+ /* Forget it if caller doesn't want joins to this rel */
+ if (bms_overlap(other_em->em_relids, prohibited_rels))
+ continue;
+
+ /*
+ * Also, if this is a child rel, avoid generating a useless join
+ * to its parent rel(s).
+ */
+ if (is_child_rel &&
+ bms_overlap(parent_relids, other_em->em_relids))
+ continue;
+
+ eq_op = select_equality_operator(cur_ec,
+ cur_em->em_datatype,
+ other_em->em_datatype);
+ if (!OidIsValid(eq_op))
+ continue;
+
+ /* set parent_ec to mark as redundant with other joinclauses */
+ rinfo = create_join_clause(root, cur_ec, eq_op,
+ cur_em, other_em,
+ cur_ec);
+
+ result = lappend(result, rinfo);
+ }
+
+ /*
+ * If somehow we failed to create any join clauses, we might as well
+ * keep scanning the ECs for another match. But if we did make any,
+ * we're done, because we don't want to return non-redundant clauses.
+ */
+ if (result)
+ break;
+ }
+
+ return result;
+}
+
+/*
+ * have_relevant_eclass_joinclause
+ * Detect whether there is an EquivalenceClass that could produce
+ * a joinclause involving the two given relations.
+ *
+ * This is essentially a very cut-down version of
+ * generate_join_implied_equalities(). Note it's OK to occasionally say "yes"
+ * incorrectly. Hence we don't bother with details like whether the lack of a
+ * cross-type operator might prevent the clause from actually being generated.
+ */
+bool
+have_relevant_eclass_joinclause(PlannerInfo *root,
+ RelOptInfo *rel1, RelOptInfo *rel2)
+{
+ Bitmapset *matching_ecs;
+ int i;
+
+ /* Examine only eclasses mentioning both rel1 and rel2 */
+ matching_ecs = get_common_eclass_indexes(root, rel1->relids,
+ rel2->relids);
+
+ i = -1;
+ while ((i = bms_next_member(matching_ecs, i)) >= 0)
+ {
+ EquivalenceClass *ec = (EquivalenceClass *) list_nth(root->eq_classes,
+ i);
+
+ /*
+ * Sanity check that get_common_eclass_indexes gave only ECs
+ * containing both rels.
+ */
+ Assert(bms_overlap(rel1->relids, ec->ec_relids));
+ Assert(bms_overlap(rel2->relids, ec->ec_relids));
+
+ /*
+ * Won't generate joinclauses if single-member (this test covers the
+ * volatile case too)
+ */
+ if (list_length(ec->ec_members) <= 1)
+ continue;
+
+ /*
+ * We do not need to examine the individual members of the EC, because
+ * all that we care about is whether each rel overlaps the relids of
+ * at least one member, and get_common_eclass_indexes() and the single
+ * member check above are sufficient to prove that. (As with
+ * have_relevant_joinclause(), it is not necessary that the EC be able
+ * to form a joinclause relating exactly the two given rels, only that
+ * it be able to form a joinclause mentioning both, and this will
+ * surely be true if both of them overlap ec_relids.)
+ *
+ * Note we don't test ec_broken; if we did, we'd need a separate code
+ * path to look through ec_sources. Checking the membership anyway is
+ * OK as a possibly-overoptimistic heuristic.
+ *
+ * We don't test ec_has_const either, even though a const eclass won't
+ * generate real join clauses. This is because if we had "WHERE a.x =
+ * b.y and a.x = 42", it is worth considering a join between a and b,
+ * since the join result is likely to be small even though it'll end
+ * up being an unqualified nestloop.
+ */
+
+ return true;
+ }
+
+ return false;
+}
+
+
+/*
+ * has_relevant_eclass_joinclause
+ * Detect whether there is an EquivalenceClass that could produce
+ * a joinclause involving the given relation and anything else.
+ *
+ * This is the same as have_relevant_eclass_joinclause with the other rel
+ * implicitly defined as "everything else in the query".
+ */
+bool
+has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
+{
+ Bitmapset *matched_ecs;
+ int i;
+
+ /* Examine only eclasses mentioning rel1 */
+ matched_ecs = get_eclass_indexes_for_relids(root, rel1->relids);
+
+ i = -1;
+ while ((i = bms_next_member(matched_ecs, i)) >= 0)
+ {
+ EquivalenceClass *ec = (EquivalenceClass *) list_nth(root->eq_classes,
+ i);
+
+ /*
+ * Won't generate joinclauses if single-member (this test covers the
+ * volatile case too)
+ */
+ if (list_length(ec->ec_members) <= 1)
+ continue;
+
+ /*
+ * Per the comment in have_relevant_eclass_joinclause, it's sufficient
+ * to find an EC that mentions both this rel and some other rel.
+ */
+ if (!bms_is_subset(ec->ec_relids, rel1->relids))
+ return true;
+ }
+
+ return false;
+}
+
+
+/*
+ * eclass_useful_for_merging
+ * Detect whether the EC could produce any mergejoinable join clauses
+ * against the specified relation.
+ *
+ * This is just a heuristic test and doesn't have to be exact; it's better
+ * to say "yes" incorrectly than "no". Hence we don't bother with details
+ * like whether the lack of a cross-type operator might prevent the clause
+ * from actually being generated.
+ */
+bool
+eclass_useful_for_merging(PlannerInfo *root,
+ EquivalenceClass *eclass,
+ RelOptInfo *rel)
+{
+ Relids relids;
+ ListCell *lc;
+
+ Assert(!eclass->ec_merged);
+
+ /*
+ * Won't generate joinclauses if const or single-member (the latter test
+ * covers the volatile case too)
+ */
+ if (eclass->ec_has_const || list_length(eclass->ec_members) <= 1)
+ return false;
+
+ /*
+ * Note we don't test ec_broken; if we did, we'd need a separate code path
+ * to look through ec_sources. Checking the members anyway is OK as a
+ * possibly-overoptimistic heuristic.
+ */
+
+ /* If specified rel is a child, we must consider the topmost parent rel */
+ if (IS_OTHER_REL(rel))
+ {
+ Assert(!bms_is_empty(rel->top_parent_relids));
+ relids = rel->top_parent_relids;
+ }
+ else
+ relids = rel->relids;
+
+ /* If rel already includes all members of eclass, no point in searching */
+ if (bms_is_subset(eclass->ec_relids, relids))
+ return false;
+
+ /* To join, we need a member not in the given rel */
+ foreach(lc, eclass->ec_members)
+ {
+ EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc);
+
+ if (cur_em->em_is_child)
+ continue; /* ignore children here */
+
+ if (!bms_overlap(cur_em->em_relids, relids))
+ return true;
+ }
+
+ return false;
+}
+
+
+/*
+ * is_redundant_derived_clause
+ * Test whether rinfo is derived from same EC as any clause in clauselist;
+ * if so, it can be presumed to represent a condition that's redundant
+ * with that member of the list.
+ */
+bool
+is_redundant_derived_clause(RestrictInfo *rinfo, List *clauselist)
+{
+ EquivalenceClass *parent_ec = rinfo->parent_ec;
+ ListCell *lc;
+
+ /* Fail if it's not a potentially-redundant clause from some EC */
+ if (parent_ec == NULL)
+ return false;
+
+ foreach(lc, clauselist)
+ {
+ RestrictInfo *otherrinfo = (RestrictInfo *) lfirst(lc);
+
+ if (otherrinfo->parent_ec == parent_ec)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * is_redundant_with_indexclauses
+ * Test whether rinfo is redundant with any clause in the IndexClause
+ * list. Here, for convenience, we test both simple identity and
+ * whether it is derived from the same EC as any member of the list.
+ */
+bool
+is_redundant_with_indexclauses(RestrictInfo *rinfo, List *indexclauses)
+{
+ EquivalenceClass *parent_ec = rinfo->parent_ec;
+ ListCell *lc;
+
+ foreach(lc, indexclauses)
+ {
+ IndexClause *iclause = lfirst_node(IndexClause, lc);
+ RestrictInfo *otherrinfo = iclause->rinfo;
+
+ /* If indexclause is lossy, it won't enforce the condition exactly */
+ if (iclause->lossy)
+ continue;
+
+ /* Match if it's same clause (pointer equality should be enough) */
+ if (rinfo == otherrinfo)
+ return true;
+ /* Match if derived from same EC */
+ if (parent_ec && otherrinfo->parent_ec == parent_ec)
+ return true;
+
+ /*
+ * No need to look at the derived clauses in iclause->indexquals; they
+ * couldn't match if the parent clause didn't.
+ */
+ }
+
+ return false;
+}
+
+/*
+ * get_eclass_indexes_for_relids
+ * Build and return a Bitmapset containing the indexes into root's
+ * eq_classes list for all eclasses that mention any of these relids
+ */
+static Bitmapset *
+get_eclass_indexes_for_relids(PlannerInfo *root, Relids relids)
+{
+ Bitmapset *ec_indexes = NULL;
+ int i = -1;
+
+ /* Should be OK to rely on eclass_indexes */
+ Assert(root->ec_merging_done);
+
+ while ((i = bms_next_member(relids, i)) > 0)
+ {
+ RelOptInfo *rel = root->simple_rel_array[i];
+
+ ec_indexes = bms_add_members(ec_indexes, rel->eclass_indexes);
+ }
+ return ec_indexes;
+}
+
+/*
+ * get_common_eclass_indexes
+ * Build and return a Bitmapset containing the indexes into root's
+ * eq_classes list for all eclasses that mention rels in both
+ * relids1 and relids2.
+ */
+static Bitmapset *
+get_common_eclass_indexes(PlannerInfo *root, Relids relids1, Relids relids2)
+{
+ Bitmapset *rel1ecs;
+ Bitmapset *rel2ecs;
+ int relid;
+
+ rel1ecs = get_eclass_indexes_for_relids(root, relids1);
+
+ /*
+ * We can get away with just using the relation's eclass_indexes directly
+ * when relids2 is a singleton set.
+ */
+ if (bms_get_singleton_member(relids2, &relid))
+ rel2ecs = root->simple_rel_array[relid]->eclass_indexes;
+ else
+ rel2ecs = get_eclass_indexes_for_relids(root, relids2);
+
+ /* Calculate and return the common EC indexes, recycling the left input. */
+ return bms_int_members(rel1ecs, rel2ecs);
+}
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
new file mode 100644
index 0000000..3800f0c
--- /dev/null
+++ b/src/backend/optimizer/path/indxpath.c
@@ -0,0 +1,3817 @@
+/*-------------------------------------------------------------------------
+ *
+ * indxpath.c
+ * Routines to determine which indexes are usable for scanning a
+ * given relation, and create Paths accordingly.
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/path/indxpath.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <math.h>
+
+#include "access/stratnum.h"
+#include "access/sysattr.h"
+#include "catalog/pg_am.h"
+#include "catalog/pg_operator.h"
+#include "catalog/pg_opfamily.h"
+#include "catalog/pg_type.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "nodes/supportnodes.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/prep.h"
+#include "optimizer/restrictinfo.h"
+#include "utils/lsyscache.h"
+#include "utils/selfuncs.h"
+
+
+/* XXX see PartCollMatchesExprColl */
+#define IndexCollMatchesExprColl(idxcollation, exprcollation) \
+ ((idxcollation) == InvalidOid || (idxcollation) == (exprcollation))
+
+/* Whether we are looking for plain indexscan, bitmap scan, or either */
+typedef enum
+{
+ ST_INDEXSCAN, /* must support amgettuple */
+ ST_BITMAPSCAN, /* must support amgetbitmap */
+ ST_ANYSCAN /* either is okay */
+} ScanTypeControl;
+
+/* Data structure for collecting qual clauses that match an index */
+typedef struct
+{
+ bool nonempty; /* True if lists are not all empty */
+ /* Lists of IndexClause nodes, one list per index column */
+ List *indexclauses[INDEX_MAX_KEYS];
+} IndexClauseSet;
+
+/* Per-path data used within choose_bitmap_and() */
+typedef struct
+{
+ Path *path; /* IndexPath, BitmapAndPath, or BitmapOrPath */
+ List *quals; /* the WHERE clauses it uses */
+ List *preds; /* predicates of its partial index(es) */
+ Bitmapset *clauseids; /* quals+preds represented as a bitmapset */
+ bool unclassifiable; /* has too many quals+preds to process? */
+} PathClauseUsage;
+
+/* Callback argument for ec_member_matches_indexcol */
+typedef struct
+{
+ IndexOptInfo *index; /* index we're considering */
+ int indexcol; /* index column we want to match to */
+} ec_member_matches_arg;
+
+
+static void consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel,
+ IndexOptInfo *index,
+ IndexClauseSet *rclauseset,
+ IndexClauseSet *jclauseset,
+ IndexClauseSet *eclauseset,
+ List **bitindexpaths);
+static void consider_index_join_outer_rels(PlannerInfo *root, RelOptInfo *rel,
+ IndexOptInfo *index,
+ IndexClauseSet *rclauseset,
+ IndexClauseSet *jclauseset,
+ IndexClauseSet *eclauseset,
+ List **bitindexpaths,
+ List *indexjoinclauses,
+ int considered_clauses,
+ List **considered_relids);
+static void get_join_index_paths(PlannerInfo *root, RelOptInfo *rel,
+ IndexOptInfo *index,
+ IndexClauseSet *rclauseset,
+ IndexClauseSet *jclauseset,
+ IndexClauseSet *eclauseset,
+ List **bitindexpaths,
+ Relids relids,
+ List **considered_relids);
+static bool eclass_already_used(EquivalenceClass *parent_ec, Relids oldrelids,
+ List *indexjoinclauses);
+static bool bms_equal_any(Relids relids, List *relids_list);
+static void get_index_paths(PlannerInfo *root, RelOptInfo *rel,
+ IndexOptInfo *index, IndexClauseSet *clauses,
+ List **bitindexpaths);
+static List *build_index_paths(PlannerInfo *root, RelOptInfo *rel,
+ IndexOptInfo *index, IndexClauseSet *clauses,
+ bool useful_predicate,
+ ScanTypeControl scantype,
+ bool *skip_nonnative_saop,
+ bool *skip_lower_saop);
+static List *build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
+ List *clauses, List *other_clauses);
+static List *generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
+ List *clauses, List *other_clauses);
+static Path *choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
+ List *paths);
+static int path_usage_comparator(const void *a, const void *b);
+static Cost bitmap_scan_cost_est(PlannerInfo *root, RelOptInfo *rel,
+ Path *ipath);
+static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel,
+ List *paths);
+static PathClauseUsage *classify_index_clause_usage(Path *path,
+ List **clauselist);
+static void find_indexpath_quals(Path *bitmapqual, List **quals, List **preds);
+static int find_list_position(Node *node, List **nodelist);
+static bool check_index_only(RelOptInfo *rel, IndexOptInfo *index);
+static double get_loop_count(PlannerInfo *root, Index cur_relid, Relids outer_relids);
+static double adjust_rowcount_for_semijoins(PlannerInfo *root,
+ Index cur_relid,
+ Index outer_relid,
+ double rowcount);
+static double approximate_joinrel_size(PlannerInfo *root, Relids relids);
+static void match_restriction_clauses_to_index(PlannerInfo *root,
+ IndexOptInfo *index,
+ IndexClauseSet *clauseset);
+static void match_join_clauses_to_index(PlannerInfo *root,
+ RelOptInfo *rel, IndexOptInfo *index,
+ IndexClauseSet *clauseset,
+ List **joinorclauses);
+static void match_eclass_clauses_to_index(PlannerInfo *root,
+ IndexOptInfo *index,
+ IndexClauseSet *clauseset);
+static void match_clauses_to_index(PlannerInfo *root,
+ List *clauses,
+ IndexOptInfo *index,
+ IndexClauseSet *clauseset);
+static void match_clause_to_index(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ IndexOptInfo *index,
+ IndexClauseSet *clauseset);
+static IndexClause *match_clause_to_indexcol(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index);
+static IndexClause *match_boolean_index_clause(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol, IndexOptInfo *index);
+static IndexClause *match_opclause_to_indexcol(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index);
+static IndexClause *match_funcclause_to_indexcol(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index);
+static IndexClause *get_index_clause_from_support(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ Oid funcid,
+ int indexarg,
+ int indexcol,
+ IndexOptInfo *index);
+static IndexClause *match_saopclause_to_indexcol(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index);
+static IndexClause *match_rowcompare_to_indexcol(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index);
+static IndexClause *expand_indexqual_rowcompare(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index,
+ Oid expr_op,
+ bool var_on_left);
+static void match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys,
+ List **orderby_clauses_p,
+ List **clause_columns_p);
+static Expr *match_clause_to_ordering_op(IndexOptInfo *index,
+ int indexcol, Expr *clause, Oid pk_opfamily);
+static bool ec_member_matches_indexcol(PlannerInfo *root, RelOptInfo *rel,
+ EquivalenceClass *ec, EquivalenceMember *em,
+ void *arg);
+
+
+/*
+ * create_index_paths()
+ * Generate all interesting index paths for the given relation.
+ * Candidate paths are added to the rel's pathlist (using add_path).
+ *
+ * To be considered for an index scan, an index must match one or more
+ * restriction clauses or join clauses from the query's qual condition,
+ * or match the query's ORDER BY condition, or have a predicate that
+ * matches the query's qual condition.
+ *
+ * There are two basic kinds of index scans. A "plain" index scan uses
+ * only restriction clauses (possibly none at all) in its indexqual,
+ * so it can be applied in any context. A "parameterized" index scan uses
+ * join clauses (plus restriction clauses, if available) in its indexqual.
+ * When joining such a scan to one of the relations supplying the other
+ * variables used in its indexqual, the parameterized scan must appear as
+ * the inner relation of a nestloop join; it can't be used on the outer side,
+ * nor in a merge or hash join. In that context, values for the other rels'
+ * attributes are available and fixed during any one scan of the indexpath.
+ *
+ * An IndexPath is generated and submitted to add_path() for each plain or
+ * parameterized index scan this routine deems potentially interesting for
+ * the current query.
+ *
+ * 'rel' is the relation for which we want to generate index paths
+ *
+ * Note: check_index_predicates() must have been run previously for this rel.
+ *
+ * Note: in cases involving LATERAL references in the relation's tlist, it's
+ * possible that rel->lateral_relids is nonempty. Currently, we include
+ * lateral_relids into the parameterization reported for each path, but don't
+ * take it into account otherwise. The fact that any such rels *must* be
+ * available as parameter sources perhaps should influence our choices of
+ * index quals ... but for now, it doesn't seem worth troubling over.
+ * In particular, comments below about "unparameterized" paths should be read
+ * as meaning "unparameterized so far as the indexquals are concerned".
+ */
+void
+create_index_paths(PlannerInfo *root, RelOptInfo *rel)
+{
+ List *indexpaths;
+ List *bitindexpaths;
+ List *bitjoinpaths;
+ List *joinorclauses;
+ IndexClauseSet rclauseset;
+ IndexClauseSet jclauseset;
+ IndexClauseSet eclauseset;
+ ListCell *lc;
+
+ /* Skip the whole mess if no indexes */
+ if (rel->indexlist == NIL)
+ return;
+
+ /* Bitmap paths are collected and then dealt with at the end */
+ bitindexpaths = bitjoinpaths = joinorclauses = NIL;
+
+ /* Examine each index in turn */
+ foreach(lc, rel->indexlist)
+ {
+ IndexOptInfo *index = (IndexOptInfo *) lfirst(lc);
+
+ /* Protect limited-size array in IndexClauseSets */
+ Assert(index->nkeycolumns <= INDEX_MAX_KEYS);
+
+ /*
+ * Ignore partial indexes that do not match the query.
+ * (generate_bitmap_or_paths() might be able to do something with
+ * them, but that's of no concern here.)
+ */
+ if (index->indpred != NIL && !index->predOK)
+ continue;
+
+ /*
+ * Identify the restriction clauses that can match the index.
+ */
+ MemSet(&rclauseset, 0, sizeof(rclauseset));
+ match_restriction_clauses_to_index(root, index, &rclauseset);
+
+ /*
+ * Build index paths from the restriction clauses. These will be
+ * non-parameterized paths. Plain paths go directly to add_path(),
+ * bitmap paths are added to bitindexpaths to be handled below.
+ */
+ get_index_paths(root, rel, index, &rclauseset,
+ &bitindexpaths);
+
+ /*
+ * Identify the join clauses that can match the index. For the moment
+ * we keep them separate from the restriction clauses. Note that this
+ * step finds only "loose" join clauses that have not been merged into
+ * EquivalenceClasses. Also, collect join OR clauses for later.
+ */
+ MemSet(&jclauseset, 0, sizeof(jclauseset));
+ match_join_clauses_to_index(root, rel, index,
+ &jclauseset, &joinorclauses);
+
+ /*
+ * Look for EquivalenceClasses that can generate joinclauses matching
+ * the index.
+ */
+ MemSet(&eclauseset, 0, sizeof(eclauseset));
+ match_eclass_clauses_to_index(root, index,
+ &eclauseset);
+
+ /*
+ * If we found any plain or eclass join clauses, build parameterized
+ * index paths using them.
+ */
+ if (jclauseset.nonempty || eclauseset.nonempty)
+ consider_index_join_clauses(root, rel, index,
+ &rclauseset,
+ &jclauseset,
+ &eclauseset,
+ &bitjoinpaths);
+ }
+
+ /*
+ * Generate BitmapOrPaths for any suitable OR-clauses present in the
+ * restriction list. Add these to bitindexpaths.
+ */
+ indexpaths = generate_bitmap_or_paths(root, rel,
+ rel->baserestrictinfo, NIL);
+ bitindexpaths = list_concat(bitindexpaths, indexpaths);
+
+ /*
+ * Likewise, generate BitmapOrPaths for any suitable OR-clauses present in
+ * the joinclause list. Add these to bitjoinpaths.
+ */
+ indexpaths = generate_bitmap_or_paths(root, rel,
+ joinorclauses, rel->baserestrictinfo);
+ bitjoinpaths = list_concat(bitjoinpaths, indexpaths);
+
+ /*
+ * If we found anything usable, generate a BitmapHeapPath for the most
+ * promising combination of restriction bitmap index paths. Note there
+ * will be only one such path no matter how many indexes exist. This
+ * should be sufficient since there's basically only one figure of merit
+ * (total cost) for such a path.
+ */
+ if (bitindexpaths != NIL)
+ {
+ Path *bitmapqual;
+ BitmapHeapPath *bpath;
+
+ bitmapqual = choose_bitmap_and(root, rel, bitindexpaths);
+ bpath = create_bitmap_heap_path(root, rel, bitmapqual,
+ rel->lateral_relids, 1.0, 0);
+ add_path(rel, (Path *) bpath);
+
+ /* create a partial bitmap heap path */
+ if (rel->consider_parallel && rel->lateral_relids == NULL)
+ create_partial_bitmap_paths(root, rel, bitmapqual);
+ }
+
+ /*
+ * Likewise, if we found anything usable, generate BitmapHeapPaths for the
+ * most promising combinations of join bitmap index paths. Our strategy
+ * is to generate one such path for each distinct parameterization seen
+ * among the available bitmap index paths. This may look pretty
+ * expensive, but usually there won't be very many distinct
+ * parameterizations. (This logic is quite similar to that in
+ * consider_index_join_clauses, but we're working with whole paths not
+ * individual clauses.)
+ */
+ if (bitjoinpaths != NIL)
+ {
+ List *all_path_outers;
+ ListCell *lc;
+
+ /* Identify each distinct parameterization seen in bitjoinpaths */
+ all_path_outers = NIL;
+ foreach(lc, bitjoinpaths)
+ {
+ Path *path = (Path *) lfirst(lc);
+ Relids required_outer = PATH_REQ_OUTER(path);
+
+ if (!bms_equal_any(required_outer, all_path_outers))
+ all_path_outers = lappend(all_path_outers, required_outer);
+ }
+
+ /* Now, for each distinct parameterization set ... */
+ foreach(lc, all_path_outers)
+ {
+ Relids max_outers = (Relids) lfirst(lc);
+ List *this_path_set;
+ Path *bitmapqual;
+ Relids required_outer;
+ double loop_count;
+ BitmapHeapPath *bpath;
+ ListCell *lcp;
+
+ /* Identify all the bitmap join paths needing no more than that */
+ this_path_set = NIL;
+ foreach(lcp, bitjoinpaths)
+ {
+ Path *path = (Path *) lfirst(lcp);
+
+ if (bms_is_subset(PATH_REQ_OUTER(path), max_outers))
+ this_path_set = lappend(this_path_set, path);
+ }
+
+ /*
+ * Add in restriction bitmap paths, since they can be used
+ * together with any join paths.
+ */
+ this_path_set = list_concat(this_path_set, bitindexpaths);
+
+ /* Select best AND combination for this parameterization */
+ bitmapqual = choose_bitmap_and(root, rel, this_path_set);
+
+ /* And push that path into the mix */
+ required_outer = PATH_REQ_OUTER(bitmapqual);
+ loop_count = get_loop_count(root, rel->relid, required_outer);
+ bpath = create_bitmap_heap_path(root, rel, bitmapqual,
+ required_outer, loop_count, 0);
+ add_path(rel, (Path *) bpath);
+ }
+ }
+}
+
+/*
+ * consider_index_join_clauses
+ * Given sets of join clauses for an index, decide which parameterized
+ * index paths to build.
+ *
+ * Plain indexpaths are sent directly to add_path, while potential
+ * bitmap indexpaths are added to *bitindexpaths for later processing.
+ *
+ * 'rel' is the index's heap relation
+ * 'index' is the index for which we want to generate paths
+ * 'rclauseset' is the collection of indexable restriction clauses
+ * 'jclauseset' is the collection of indexable simple join clauses
+ * 'eclauseset' is the collection of indexable clauses from EquivalenceClasses
+ * '*bitindexpaths' is the list to add bitmap paths to
+ */
+static void
+consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel,
+ IndexOptInfo *index,
+ IndexClauseSet *rclauseset,
+ IndexClauseSet *jclauseset,
+ IndexClauseSet *eclauseset,
+ List **bitindexpaths)
+{
+ int considered_clauses = 0;
+ List *considered_relids = NIL;
+ int indexcol;
+
+ /*
+ * The strategy here is to identify every potentially useful set of outer
+ * rels that can provide indexable join clauses. For each such set,
+ * select all the join clauses available from those outer rels, add on all
+ * the indexable restriction clauses, and generate plain and/or bitmap
+ * index paths for that set of clauses. This is based on the assumption
+ * that it's always better to apply a clause as an indexqual than as a
+ * filter (qpqual); which is where an available clause would end up being
+ * applied if we omit it from the indexquals.
+ *
+ * This looks expensive, but in most practical cases there won't be very
+ * many distinct sets of outer rels to consider. As a safety valve when
+ * that's not true, we use a heuristic: limit the number of outer rel sets
+ * considered to a multiple of the number of clauses considered. (We'll
+ * always consider using each individual join clause, though.)
+ *
+ * For simplicity in selecting relevant clauses, we represent each set of
+ * outer rels as a maximum set of clause_relids --- that is, the indexed
+ * relation itself is also included in the relids set. considered_relids
+ * lists all relids sets we've already tried.
+ */
+ for (indexcol = 0; indexcol < index->nkeycolumns; indexcol++)
+ {
+ /* Consider each applicable simple join clause */
+ considered_clauses += list_length(jclauseset->indexclauses[indexcol]);
+ consider_index_join_outer_rels(root, rel, index,
+ rclauseset, jclauseset, eclauseset,
+ bitindexpaths,
+ jclauseset->indexclauses[indexcol],
+ considered_clauses,
+ &considered_relids);
+ /* Consider each applicable eclass join clause */
+ considered_clauses += list_length(eclauseset->indexclauses[indexcol]);
+ consider_index_join_outer_rels(root, rel, index,
+ rclauseset, jclauseset, eclauseset,
+ bitindexpaths,
+ eclauseset->indexclauses[indexcol],
+ considered_clauses,
+ &considered_relids);
+ }
+}
+
+/*
+ * consider_index_join_outer_rels
+ * Generate parameterized paths based on clause relids in the clause list.
+ *
+ * Workhorse for consider_index_join_clauses; see notes therein for rationale.
+ *
+ * 'rel', 'index', 'rclauseset', 'jclauseset', 'eclauseset', and
+ * 'bitindexpaths' as above
+ * 'indexjoinclauses' is a list of IndexClauses for join clauses
+ * 'considered_clauses' is the total number of clauses considered (so far)
+ * '*considered_relids' is a list of all relids sets already considered
+ */
+static void
+consider_index_join_outer_rels(PlannerInfo *root, RelOptInfo *rel,
+ IndexOptInfo *index,
+ IndexClauseSet *rclauseset,
+ IndexClauseSet *jclauseset,
+ IndexClauseSet *eclauseset,
+ List **bitindexpaths,
+ List *indexjoinclauses,
+ int considered_clauses,
+ List **considered_relids)
+{
+ ListCell *lc;
+
+ /* Examine relids of each joinclause in the given list */
+ foreach(lc, indexjoinclauses)
+ {
+ IndexClause *iclause = (IndexClause *) lfirst(lc);
+ Relids clause_relids = iclause->rinfo->clause_relids;
+ EquivalenceClass *parent_ec = iclause->rinfo->parent_ec;
+ int num_considered_relids;
+
+ /* If we already tried its relids set, no need to do so again */
+ if (bms_equal_any(clause_relids, *considered_relids))
+ continue;
+
+ /*
+ * Generate the union of this clause's relids set with each
+ * previously-tried set. This ensures we try this clause along with
+ * every interesting subset of previous clauses. However, to avoid
+ * exponential growth of planning time when there are many clauses,
+ * limit the number of relid sets accepted to 10 * considered_clauses.
+ *
+ * Note: get_join_index_paths appends entries to *considered_relids,
+ * but we do not need to visit such newly-added entries within this
+ * loop, so we don't use foreach() here. No real harm would be done
+ * if we did visit them, since the subset check would reject them; but
+ * it would waste some cycles.
+ */
+ num_considered_relids = list_length(*considered_relids);
+ for (int pos = 0; pos < num_considered_relids; pos++)
+ {
+ Relids oldrelids = (Relids) list_nth(*considered_relids, pos);
+
+ /*
+ * If either is a subset of the other, no new set is possible.
+ * This isn't a complete test for redundancy, but it's easy and
+ * cheap. get_join_index_paths will check more carefully if we
+ * already generated the same relids set.
+ */
+ if (bms_subset_compare(clause_relids, oldrelids) != BMS_DIFFERENT)
+ continue;
+
+ /*
+ * If this clause was derived from an equivalence class, the
+ * clause list may contain other clauses derived from the same
+ * eclass. We should not consider that combining this clause with
+ * one of those clauses generates a usefully different
+ * parameterization; so skip if any clause derived from the same
+ * eclass would already have been included when using oldrelids.
+ */
+ if (parent_ec &&
+ eclass_already_used(parent_ec, oldrelids,
+ indexjoinclauses))
+ continue;
+
+ /*
+ * If the number of relid sets considered exceeds our heuristic
+ * limit, stop considering combinations of clauses. We'll still
+ * consider the current clause alone, though (below this loop).
+ */
+ if (list_length(*considered_relids) >= 10 * considered_clauses)
+ break;
+
+ /* OK, try the union set */
+ get_join_index_paths(root, rel, index,
+ rclauseset, jclauseset, eclauseset,
+ bitindexpaths,
+ bms_union(clause_relids, oldrelids),
+ considered_relids);
+ }
+
+ /* Also try this set of relids by itself */
+ get_join_index_paths(root, rel, index,
+ rclauseset, jclauseset, eclauseset,
+ bitindexpaths,
+ clause_relids,
+ considered_relids);
+ }
+}
+
+/*
+ * get_join_index_paths
+ * Generate index paths using clauses from the specified outer relations.
+ * In addition to generating paths, relids is added to *considered_relids
+ * if not already present.
+ *
+ * Workhorse for consider_index_join_clauses; see notes therein for rationale.
+ *
+ * 'rel', 'index', 'rclauseset', 'jclauseset', 'eclauseset',
+ * 'bitindexpaths', 'considered_relids' as above
+ * 'relids' is the current set of relids to consider (the target rel plus
+ * one or more outer rels)
+ */
+static void
+get_join_index_paths(PlannerInfo *root, RelOptInfo *rel,
+ IndexOptInfo *index,
+ IndexClauseSet *rclauseset,
+ IndexClauseSet *jclauseset,
+ IndexClauseSet *eclauseset,
+ List **bitindexpaths,
+ Relids relids,
+ List **considered_relids)
+{
+ IndexClauseSet clauseset;
+ int indexcol;
+
+ /* If we already considered this relids set, don't repeat the work */
+ if (bms_equal_any(relids, *considered_relids))
+ return;
+
+ /* Identify indexclauses usable with this relids set */
+ MemSet(&clauseset, 0, sizeof(clauseset));
+
+ for (indexcol = 0; indexcol < index->nkeycolumns; indexcol++)
+ {
+ ListCell *lc;
+
+ /* First find applicable simple join clauses */
+ foreach(lc, jclauseset->indexclauses[indexcol])
+ {
+ IndexClause *iclause = (IndexClause *) lfirst(lc);
+
+ if (bms_is_subset(iclause->rinfo->clause_relids, relids))
+ clauseset.indexclauses[indexcol] =
+ lappend(clauseset.indexclauses[indexcol], iclause);
+ }
+
+ /*
+ * Add applicable eclass join clauses. The clauses generated for each
+ * column are redundant (cf generate_implied_equalities_for_column),
+ * so we need at most one. This is the only exception to the general
+ * rule of using all available index clauses.
+ */
+ foreach(lc, eclauseset->indexclauses[indexcol])
+ {
+ IndexClause *iclause = (IndexClause *) lfirst(lc);
+
+ if (bms_is_subset(iclause->rinfo->clause_relids, relids))
+ {
+ clauseset.indexclauses[indexcol] =
+ lappend(clauseset.indexclauses[indexcol], iclause);
+ break;
+ }
+ }
+
+ /* Add restriction clauses */
+ clauseset.indexclauses[indexcol] =
+ list_concat(clauseset.indexclauses[indexcol],
+ rclauseset->indexclauses[indexcol]);
+
+ if (clauseset.indexclauses[indexcol] != NIL)
+ clauseset.nonempty = true;
+ }
+
+ /* We should have found something, else caller passed silly relids */
+ Assert(clauseset.nonempty);
+
+ /* Build index path(s) using the collected set of clauses */
+ get_index_paths(root, rel, index, &clauseset, bitindexpaths);
+
+ /*
+ * Remember we considered paths for this set of relids.
+ */
+ *considered_relids = lappend(*considered_relids, relids);
+}
+
+/*
+ * eclass_already_used
+ * True if any join clause usable with oldrelids was generated from
+ * the specified equivalence class.
+ */
+static bool
+eclass_already_used(EquivalenceClass *parent_ec, Relids oldrelids,
+ List *indexjoinclauses)
+{
+ ListCell *lc;
+
+ foreach(lc, indexjoinclauses)
+ {
+ IndexClause *iclause = (IndexClause *) lfirst(lc);
+ RestrictInfo *rinfo = iclause->rinfo;
+
+ if (rinfo->parent_ec == parent_ec &&
+ bms_is_subset(rinfo->clause_relids, oldrelids))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * bms_equal_any
+ * True if relids is bms_equal to any member of relids_list
+ *
+ * Perhaps this should be in bitmapset.c someday.
+ */
+static bool
+bms_equal_any(Relids relids, List *relids_list)
+{
+ ListCell *lc;
+
+ foreach(lc, relids_list)
+ {
+ if (bms_equal(relids, (Relids) lfirst(lc)))
+ return true;
+ }
+ return false;
+}
+
+
+/*
+ * get_index_paths
+ * Given an index and a set of index clauses for it, construct IndexPaths.
+ *
+ * Plain indexpaths are sent directly to add_path, while potential
+ * bitmap indexpaths are added to *bitindexpaths for later processing.
+ *
+ * This is a fairly simple frontend to build_index_paths(). Its reason for
+ * existence is mainly to handle ScalarArrayOpExpr quals properly. If the
+ * index AM supports them natively, we should just include them in simple
+ * index paths. If not, we should exclude them while building simple index
+ * paths, and then make a separate attempt to include them in bitmap paths.
+ * Furthermore, we should consider excluding lower-order ScalarArrayOpExpr
+ * quals so as to create ordered paths.
+ */
+static void
+get_index_paths(PlannerInfo *root, RelOptInfo *rel,
+ IndexOptInfo *index, IndexClauseSet *clauses,
+ List **bitindexpaths)
+{
+ List *indexpaths;
+ bool skip_nonnative_saop = false;
+ bool skip_lower_saop = false;
+ ListCell *lc;
+
+ /*
+ * Build simple index paths using the clauses. Allow ScalarArrayOpExpr
+ * clauses only if the index AM supports them natively, and skip any such
+ * clauses for index columns after the first (so that we produce ordered
+ * paths if possible).
+ */
+ indexpaths = build_index_paths(root, rel,
+ index, clauses,
+ index->predOK,
+ ST_ANYSCAN,
+ &skip_nonnative_saop,
+ &skip_lower_saop);
+
+ /*
+ * If we skipped any lower-order ScalarArrayOpExprs on an index with an AM
+ * that supports them, then try again including those clauses. This will
+ * produce paths with more selectivity but no ordering.
+ */
+ if (skip_lower_saop)
+ {
+ indexpaths = list_concat(indexpaths,
+ build_index_paths(root, rel,
+ index, clauses,
+ index->predOK,
+ ST_ANYSCAN,
+ &skip_nonnative_saop,
+ NULL));
+ }
+
+ /*
+ * Submit all the ones that can form plain IndexScan plans to add_path. (A
+ * plain IndexPath can represent either a plain IndexScan or an
+ * IndexOnlyScan, but for our purposes here that distinction does not
+ * matter. However, some of the indexes might support only bitmap scans,
+ * and those we mustn't submit to add_path here.)
+ *
+ * Also, pick out the ones that are usable as bitmap scans. For that, we
+ * must discard indexes that don't support bitmap scans, and we also are
+ * only interested in paths that have some selectivity; we should discard
+ * anything that was generated solely for ordering purposes.
+ */
+ foreach(lc, indexpaths)
+ {
+ IndexPath *ipath = (IndexPath *) lfirst(lc);
+
+ if (index->amhasgettuple)
+ add_path(rel, (Path *) ipath);
+
+ if (index->amhasgetbitmap &&
+ (ipath->path.pathkeys == NIL ||
+ ipath->indexselectivity < 1.0))
+ *bitindexpaths = lappend(*bitindexpaths, ipath);
+ }
+
+ /*
+ * If there were ScalarArrayOpExpr clauses that the index can't handle
+ * natively, generate bitmap scan paths relying on executor-managed
+ * ScalarArrayOpExpr.
+ */
+ if (skip_nonnative_saop)
+ {
+ indexpaths = build_index_paths(root, rel,
+ index, clauses,
+ false,
+ ST_BITMAPSCAN,
+ NULL,
+ NULL);
+ *bitindexpaths = list_concat(*bitindexpaths, indexpaths);
+ }
+}
+
+/*
+ * build_index_paths
+ * Given an index and a set of index clauses for it, construct zero
+ * or more IndexPaths. It also constructs zero or more partial IndexPaths.
+ *
+ * We return a list of paths because (1) this routine checks some cases
+ * that should cause us to not generate any IndexPath, and (2) in some
+ * cases we want to consider both a forward and a backward scan, so as
+ * to obtain both sort orders. Note that the paths are just returned
+ * to the caller and not immediately fed to add_path().
+ *
+ * At top level, useful_predicate should be exactly the index's predOK flag
+ * (ie, true if it has a predicate that was proven from the restriction
+ * clauses). When working on an arm of an OR clause, useful_predicate
+ * should be true if the predicate required the current OR list to be proven.
+ * Note that this routine should never be called at all if the index has an
+ * unprovable predicate.
+ *
+ * scantype indicates whether we want to create plain indexscans, bitmap
+ * indexscans, or both. When it's ST_BITMAPSCAN, we will not consider
+ * index ordering while deciding if a Path is worth generating.
+ *
+ * If skip_nonnative_saop is non-NULL, we ignore ScalarArrayOpExpr clauses
+ * unless the index AM supports them directly, and we set *skip_nonnative_saop
+ * to true if we found any such clauses (caller must initialize the variable
+ * to false). If it's NULL, we do not ignore ScalarArrayOpExpr clauses.
+ *
+ * If skip_lower_saop is non-NULL, we ignore ScalarArrayOpExpr clauses for
+ * non-first index columns, and we set *skip_lower_saop to true if we found
+ * any such clauses (caller must initialize the variable to false). If it's
+ * NULL, we do not ignore non-first ScalarArrayOpExpr clauses, but they will
+ * result in considering the scan's output to be unordered.
+ *
+ * 'rel' is the index's heap relation
+ * 'index' is the index for which we want to generate paths
+ * 'clauses' is the collection of indexable clauses (IndexClause nodes)
+ * 'useful_predicate' indicates whether the index has a useful predicate
+ * 'scantype' indicates whether we need plain or bitmap scan support
+ * 'skip_nonnative_saop' indicates whether to accept SAOP if index AM doesn't
+ * 'skip_lower_saop' indicates whether to accept non-first-column SAOP
+ */
+static List *
+build_index_paths(PlannerInfo *root, RelOptInfo *rel,
+ IndexOptInfo *index, IndexClauseSet *clauses,
+ bool useful_predicate,
+ ScanTypeControl scantype,
+ bool *skip_nonnative_saop,
+ bool *skip_lower_saop)
+{
+ List *result = NIL;
+ IndexPath *ipath;
+ List *index_clauses;
+ Relids outer_relids;
+ double loop_count;
+ List *orderbyclauses;
+ List *orderbyclausecols;
+ List *index_pathkeys;
+ List *useful_pathkeys;
+ bool found_lower_saop_clause;
+ bool pathkeys_possibly_useful;
+ bool index_is_ordered;
+ bool index_only_scan;
+ int indexcol;
+
+ /*
+ * Check that index supports the desired scan type(s)
+ */
+ switch (scantype)
+ {
+ case ST_INDEXSCAN:
+ if (!index->amhasgettuple)
+ return NIL;
+ break;
+ case ST_BITMAPSCAN:
+ if (!index->amhasgetbitmap)
+ return NIL;
+ break;
+ case ST_ANYSCAN:
+ /* either or both are OK */
+ break;
+ }
+
+ /*
+ * 1. Combine the per-column IndexClause lists into an overall list.
+ *
+ * In the resulting list, clauses are ordered by index key, so that the
+ * column numbers form a nondecreasing sequence. (This order is depended
+ * on by btree and possibly other places.) The list can be empty, if the
+ * index AM allows that.
+ *
+ * found_lower_saop_clause is set true if we accept a ScalarArrayOpExpr
+ * index clause for a non-first index column. This prevents us from
+ * assuming that the scan result is ordered. (Actually, the result is
+ * still ordered if there are equality constraints for all earlier
+ * columns, but it seems too expensive and non-modular for this code to be
+ * aware of that refinement.)
+ *
+ * We also build a Relids set showing which outer rels are required by the
+ * selected clauses. Any lateral_relids are included in that, but not
+ * otherwise accounted for.
+ */
+ index_clauses = NIL;
+ found_lower_saop_clause = false;
+ outer_relids = bms_copy(rel->lateral_relids);
+ for (indexcol = 0; indexcol < index->nkeycolumns; indexcol++)
+ {
+ ListCell *lc;
+
+ foreach(lc, clauses->indexclauses[indexcol])
+ {
+ IndexClause *iclause = (IndexClause *) lfirst(lc);
+ RestrictInfo *rinfo = iclause->rinfo;
+
+ /* We might need to omit ScalarArrayOpExpr clauses */
+ if (IsA(rinfo->clause, ScalarArrayOpExpr))
+ {
+ if (!index->amsearcharray)
+ {
+ if (skip_nonnative_saop)
+ {
+ /* Ignore because not supported by index */
+ *skip_nonnative_saop = true;
+ continue;
+ }
+ /* Caller had better intend this only for bitmap scan */
+ Assert(scantype == ST_BITMAPSCAN);
+ }
+ if (indexcol > 0)
+ {
+ if (skip_lower_saop)
+ {
+ /* Caller doesn't want to lose index ordering */
+ *skip_lower_saop = true;
+ continue;
+ }
+ found_lower_saop_clause = true;
+ }
+ }
+
+ /* OK to include this clause */
+ index_clauses = lappend(index_clauses, iclause);
+ outer_relids = bms_add_members(outer_relids,
+ rinfo->clause_relids);
+ }
+
+ /*
+ * If no clauses match the first index column, check for amoptionalkey
+ * restriction. We can't generate a scan over an index with
+ * amoptionalkey = false unless there's at least one index clause.
+ * (When working on columns after the first, this test cannot fail. It
+ * is always okay for columns after the first to not have any
+ * clauses.)
+ */
+ if (index_clauses == NIL && !index->amoptionalkey)
+ return NIL;
+ }
+
+ /* We do not want the index's rel itself listed in outer_relids */
+ outer_relids = bms_del_member(outer_relids, rel->relid);
+ /* Enforce convention that outer_relids is exactly NULL if empty */
+ if (bms_is_empty(outer_relids))
+ outer_relids = NULL;
+
+ /* Compute loop_count for cost estimation purposes */
+ loop_count = get_loop_count(root, rel->relid, outer_relids);
+
+ /*
+ * 2. Compute pathkeys describing index's ordering, if any, then see how
+ * many of them are actually useful for this query. This is not relevant
+ * if we are only trying to build bitmap indexscans, nor if we have to
+ * assume the scan is unordered.
+ */
+ pathkeys_possibly_useful = (scantype != ST_BITMAPSCAN &&
+ !found_lower_saop_clause &&
+ has_useful_pathkeys(root, rel));
+ index_is_ordered = (index->sortopfamily != NULL);
+ if (index_is_ordered && pathkeys_possibly_useful)
+ {
+ index_pathkeys = build_index_pathkeys(root, index,
+ ForwardScanDirection);
+ useful_pathkeys = truncate_useless_pathkeys(root, rel,
+ index_pathkeys);
+ orderbyclauses = NIL;
+ orderbyclausecols = NIL;
+ }
+ else if (index->amcanorderbyop && pathkeys_possibly_useful)
+ {
+ /* see if we can generate ordering operators for query_pathkeys */
+ match_pathkeys_to_index(index, root->query_pathkeys,
+ &orderbyclauses,
+ &orderbyclausecols);
+ if (orderbyclauses)
+ useful_pathkeys = root->query_pathkeys;
+ else
+ useful_pathkeys = NIL;
+ }
+ else
+ {
+ useful_pathkeys = NIL;
+ orderbyclauses = NIL;
+ orderbyclausecols = NIL;
+ }
+
+ /*
+ * 3. Check if an index-only scan is possible. If we're not building
+ * plain indexscans, this isn't relevant since bitmap scans don't support
+ * index data retrieval anyway.
+ */
+ index_only_scan = (scantype != ST_BITMAPSCAN &&
+ check_index_only(rel, index));
+
+ /*
+ * 4. Generate an indexscan path if there are relevant restriction clauses
+ * in the current clauses, OR the index ordering is potentially useful for
+ * later merging or final output ordering, OR the index has a useful
+ * predicate, OR an index-only scan is possible.
+ */
+ if (index_clauses != NIL || useful_pathkeys != NIL || useful_predicate ||
+ index_only_scan)
+ {
+ ipath = create_index_path(root, index,
+ index_clauses,
+ orderbyclauses,
+ orderbyclausecols,
+ useful_pathkeys,
+ index_is_ordered ?
+ ForwardScanDirection :
+ NoMovementScanDirection,
+ index_only_scan,
+ outer_relids,
+ loop_count,
+ false);
+ result = lappend(result, ipath);
+
+ /*
+ * If appropriate, consider parallel index scan. We don't allow
+ * parallel index scan for bitmap index scans.
+ */
+ if (index->amcanparallel &&
+ rel->consider_parallel && outer_relids == NULL &&
+ scantype != ST_BITMAPSCAN)
+ {
+ ipath = create_index_path(root, index,
+ index_clauses,
+ orderbyclauses,
+ orderbyclausecols,
+ useful_pathkeys,
+ index_is_ordered ?
+ ForwardScanDirection :
+ NoMovementScanDirection,
+ index_only_scan,
+ outer_relids,
+ loop_count,
+ true);
+
+ /*
+ * if, after costing the path, we find that it's not worth using
+ * parallel workers, just free it.
+ */
+ if (ipath->path.parallel_workers > 0)
+ add_partial_path(rel, (Path *) ipath);
+ else
+ pfree(ipath);
+ }
+ }
+
+ /*
+ * 5. If the index is ordered, a backwards scan might be interesting.
+ */
+ if (index_is_ordered && pathkeys_possibly_useful)
+ {
+ index_pathkeys = build_index_pathkeys(root, index,
+ BackwardScanDirection);
+ useful_pathkeys = truncate_useless_pathkeys(root, rel,
+ index_pathkeys);
+ if (useful_pathkeys != NIL)
+ {
+ ipath = create_index_path(root, index,
+ index_clauses,
+ NIL,
+ NIL,
+ useful_pathkeys,
+ BackwardScanDirection,
+ index_only_scan,
+ outer_relids,
+ loop_count,
+ false);
+ result = lappend(result, ipath);
+
+ /* If appropriate, consider parallel index scan */
+ if (index->amcanparallel &&
+ rel->consider_parallel && outer_relids == NULL &&
+ scantype != ST_BITMAPSCAN)
+ {
+ ipath = create_index_path(root, index,
+ index_clauses,
+ NIL,
+ NIL,
+ useful_pathkeys,
+ BackwardScanDirection,
+ index_only_scan,
+ outer_relids,
+ loop_count,
+ true);
+
+ /*
+ * if, after costing the path, we find that it's not worth
+ * using parallel workers, just free it.
+ */
+ if (ipath->path.parallel_workers > 0)
+ add_partial_path(rel, (Path *) ipath);
+ else
+ pfree(ipath);
+ }
+ }
+ }
+
+ return result;
+}
+
+/*
+ * build_paths_for_OR
+ * Given a list of restriction clauses from one arm of an OR clause,
+ * construct all matching IndexPaths for the relation.
+ *
+ * Here we must scan all indexes of the relation, since a bitmap OR tree
+ * can use multiple indexes.
+ *
+ * The caller actually supplies two lists of restriction clauses: some
+ * "current" ones and some "other" ones. Both lists can be used freely
+ * to match keys of the index, but an index must use at least one of the
+ * "current" clauses to be considered usable. The motivation for this is
+ * examples like
+ * WHERE (x = 42) AND (... OR (y = 52 AND z = 77) OR ....)
+ * While we are considering the y/z subclause of the OR, we can use "x = 42"
+ * as one of the available index conditions; but we shouldn't match the
+ * subclause to any index on x alone, because such a Path would already have
+ * been generated at the upper level. So we could use an index on x,y,z
+ * or an index on x,y for the OR subclause, but not an index on just x.
+ * When dealing with a partial index, a match of the index predicate to
+ * one of the "current" clauses also makes the index usable.
+ *
+ * 'rel' is the relation for which we want to generate index paths
+ * 'clauses' is the current list of clauses (RestrictInfo nodes)
+ * 'other_clauses' is the list of additional upper-level clauses
+ */
+static List *
+build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
+ List *clauses, List *other_clauses)
+{
+ List *result = NIL;
+ List *all_clauses = NIL; /* not computed till needed */
+ ListCell *lc;
+
+ foreach(lc, rel->indexlist)
+ {
+ IndexOptInfo *index = (IndexOptInfo *) lfirst(lc);
+ IndexClauseSet clauseset;
+ List *indexpaths;
+ bool useful_predicate;
+
+ /* Ignore index if it doesn't support bitmap scans */
+ if (!index->amhasgetbitmap)
+ continue;
+
+ /*
+ * Ignore partial indexes that do not match the query. If a partial
+ * index is marked predOK then we know it's OK. Otherwise, we have to
+ * test whether the added clauses are sufficient to imply the
+ * predicate. If so, we can use the index in the current context.
+ *
+ * We set useful_predicate to true iff the predicate was proven using
+ * the current set of clauses. This is needed to prevent matching a
+ * predOK index to an arm of an OR, which would be a legal but
+ * pointlessly inefficient plan. (A better plan will be generated by
+ * just scanning the predOK index alone, no OR.)
+ */
+ useful_predicate = false;
+ if (index->indpred != NIL)
+ {
+ if (index->predOK)
+ {
+ /* Usable, but don't set useful_predicate */
+ }
+ else
+ {
+ /* Form all_clauses if not done already */
+ if (all_clauses == NIL)
+ all_clauses = list_concat_copy(clauses, other_clauses);
+
+ if (!predicate_implied_by(index->indpred, all_clauses, false))
+ continue; /* can't use it at all */
+
+ if (!predicate_implied_by(index->indpred, other_clauses, false))
+ useful_predicate = true;
+ }
+ }
+
+ /*
+ * Identify the restriction clauses that can match the index.
+ */
+ MemSet(&clauseset, 0, sizeof(clauseset));
+ match_clauses_to_index(root, clauses, index, &clauseset);
+
+ /*
+ * If no matches so far, and the index predicate isn't useful, we
+ * don't want it.
+ */
+ if (!clauseset.nonempty && !useful_predicate)
+ continue;
+
+ /*
+ * Add "other" restriction clauses to the clauseset.
+ */
+ match_clauses_to_index(root, other_clauses, index, &clauseset);
+
+ /*
+ * Construct paths if possible.
+ */
+ indexpaths = build_index_paths(root, rel,
+ index, &clauseset,
+ useful_predicate,
+ ST_BITMAPSCAN,
+ NULL,
+ NULL);
+ result = list_concat(result, indexpaths);
+ }
+
+ return result;
+}
+
+/*
+ * generate_bitmap_or_paths
+ * Look through the list of clauses to find OR clauses, and generate
+ * a BitmapOrPath for each one we can handle that way. Return a list
+ * of the generated BitmapOrPaths.
+ *
+ * other_clauses is a list of additional clauses that can be assumed true
+ * for the purpose of generating indexquals, but are not to be searched for
+ * ORs. (See build_paths_for_OR() for motivation.)
+ */
+static List *
+generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
+ List *clauses, List *other_clauses)
+{
+ List *result = NIL;
+ List *all_clauses;
+ ListCell *lc;
+
+ /*
+ * We can use both the current and other clauses as context for
+ * build_paths_for_OR; no need to remove ORs from the lists.
+ */
+ all_clauses = list_concat_copy(clauses, other_clauses);
+
+ foreach(lc, clauses)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
+ List *pathlist;
+ Path *bitmapqual;
+ ListCell *j;
+
+ /* Ignore RestrictInfos that aren't ORs */
+ if (!restriction_is_or_clause(rinfo))
+ continue;
+
+ /*
+ * We must be able to match at least one index to each of the arms of
+ * the OR, else we can't use it.
+ */
+ pathlist = NIL;
+ foreach(j, ((BoolExpr *) rinfo->orclause)->args)
+ {
+ Node *orarg = (Node *) lfirst(j);
+ List *indlist;
+
+ /* OR arguments should be ANDs or sub-RestrictInfos */
+ if (is_andclause(orarg))
+ {
+ List *andargs = ((BoolExpr *) orarg)->args;
+
+ indlist = build_paths_for_OR(root, rel,
+ andargs,
+ all_clauses);
+
+ /* Recurse in case there are sub-ORs */
+ indlist = list_concat(indlist,
+ generate_bitmap_or_paths(root, rel,
+ andargs,
+ all_clauses));
+ }
+ else
+ {
+ RestrictInfo *rinfo = castNode(RestrictInfo, orarg);
+ List *orargs;
+
+ Assert(!restriction_is_or_clause(rinfo));
+ orargs = list_make1(rinfo);
+
+ indlist = build_paths_for_OR(root, rel,
+ orargs,
+ all_clauses);
+ }
+
+ /*
+ * If nothing matched this arm, we can't do anything with this OR
+ * clause.
+ */
+ if (indlist == NIL)
+ {
+ pathlist = NIL;
+ break;
+ }
+
+ /*
+ * OK, pick the most promising AND combination, and add it to
+ * pathlist.
+ */
+ bitmapqual = choose_bitmap_and(root, rel, indlist);
+ pathlist = lappend(pathlist, bitmapqual);
+ }
+
+ /*
+ * If we have a match for every arm, then turn them into a
+ * BitmapOrPath, and add to result list.
+ */
+ if (pathlist != NIL)
+ {
+ bitmapqual = (Path *) create_bitmap_or_path(root, rel, pathlist);
+ result = lappend(result, bitmapqual);
+ }
+ }
+
+ return result;
+}
+
+
+/*
+ * choose_bitmap_and
+ * Given a nonempty list of bitmap paths, AND them into one path.
+ *
+ * This is a nontrivial decision since we can legally use any subset of the
+ * given path set. We want to choose a good tradeoff between selectivity
+ * and cost of computing the bitmap.
+ *
+ * The result is either a single one of the inputs, or a BitmapAndPath
+ * combining multiple inputs.
+ */
+static Path *
+choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
+{
+ int npaths = list_length(paths);
+ PathClauseUsage **pathinfoarray;
+ PathClauseUsage *pathinfo;
+ List *clauselist;
+ List *bestpaths = NIL;
+ Cost bestcost = 0;
+ int i,
+ j;
+ ListCell *l;
+
+ Assert(npaths > 0); /* else caller error */
+ if (npaths == 1)
+ return (Path *) linitial(paths); /* easy case */
+
+ /*
+ * In theory we should consider every nonempty subset of the given paths.
+ * In practice that seems like overkill, given the crude nature of the
+ * estimates, not to mention the possible effects of higher-level AND and
+ * OR clauses. Moreover, it's completely impractical if there are a large
+ * number of paths, since the work would grow as O(2^N).
+ *
+ * As a heuristic, we first check for paths using exactly the same sets of
+ * WHERE clauses + index predicate conditions, and reject all but the
+ * cheapest-to-scan in any such group. This primarily gets rid of indexes
+ * that include the interesting columns but also irrelevant columns. (In
+ * situations where the DBA has gone overboard on creating variant
+ * indexes, this can make for a very large reduction in the number of
+ * paths considered further.)
+ *
+ * We then sort the surviving paths with the cheapest-to-scan first, and
+ * for each path, consider using that path alone as the basis for a bitmap
+ * scan. Then we consider bitmap AND scans formed from that path plus
+ * each subsequent (higher-cost) path, adding on a subsequent path if it
+ * results in a reduction in the estimated total scan cost. This means we
+ * consider about O(N^2) rather than O(2^N) path combinations, which is
+ * quite tolerable, especially given than N is usually reasonably small
+ * because of the prefiltering step. The cheapest of these is returned.
+ *
+ * We will only consider AND combinations in which no two indexes use the
+ * same WHERE clause. This is a bit of a kluge: it's needed because
+ * costsize.c and clausesel.c aren't very smart about redundant clauses.
+ * They will usually double-count the redundant clauses, producing a
+ * too-small selectivity that makes a redundant AND step look like it
+ * reduces the total cost. Perhaps someday that code will be smarter and
+ * we can remove this limitation. (But note that this also defends
+ * against flat-out duplicate input paths, which can happen because
+ * match_join_clauses_to_index will find the same OR join clauses that
+ * extract_restriction_or_clauses has pulled OR restriction clauses out
+ * of.)
+ *
+ * For the same reason, we reject AND combinations in which an index
+ * predicate clause duplicates another clause. Here we find it necessary
+ * to be even stricter: we'll reject a partial index if any of its
+ * predicate clauses are implied by the set of WHERE clauses and predicate
+ * clauses used so far. This covers cases such as a condition "x = 42"
+ * used with a plain index, followed by a clauseless scan of a partial
+ * index "WHERE x >= 40 AND x < 50". The partial index has been accepted
+ * only because "x = 42" was present, and so allowing it would partially
+ * double-count selectivity. (We could use predicate_implied_by on
+ * regular qual clauses too, to have a more intelligent, but much more
+ * expensive, check for redundancy --- but in most cases simple equality
+ * seems to suffice.)
+ */
+
+ /*
+ * Extract clause usage info and detect any paths that use exactly the
+ * same set of clauses; keep only the cheapest-to-scan of any such groups.
+ * The surviving paths are put into an array for qsort'ing.
+ */
+ pathinfoarray = (PathClauseUsage **)
+ palloc(npaths * sizeof(PathClauseUsage *));
+ clauselist = NIL;
+ npaths = 0;
+ foreach(l, paths)
+ {
+ Path *ipath = (Path *) lfirst(l);
+
+ pathinfo = classify_index_clause_usage(ipath, &clauselist);
+
+ /* If it's unclassifiable, treat it as distinct from all others */
+ if (pathinfo->unclassifiable)
+ {
+ pathinfoarray[npaths++] = pathinfo;
+ continue;
+ }
+
+ for (i = 0; i < npaths; i++)
+ {
+ if (!pathinfoarray[i]->unclassifiable &&
+ bms_equal(pathinfo->clauseids, pathinfoarray[i]->clauseids))
+ break;
+ }
+ if (i < npaths)
+ {
+ /* duplicate clauseids, keep the cheaper one */
+ Cost ncost;
+ Cost ocost;
+ Selectivity nselec;
+ Selectivity oselec;
+
+ cost_bitmap_tree_node(pathinfo->path, &ncost, &nselec);
+ cost_bitmap_tree_node(pathinfoarray[i]->path, &ocost, &oselec);
+ if (ncost < ocost)
+ pathinfoarray[i] = pathinfo;
+ }
+ else
+ {
+ /* not duplicate clauseids, add to array */
+ pathinfoarray[npaths++] = pathinfo;
+ }
+ }
+
+ /* If only one surviving path, we're done */
+ if (npaths == 1)
+ return pathinfoarray[0]->path;
+
+ /* Sort the surviving paths by index access cost */
+ qsort(pathinfoarray, npaths, sizeof(PathClauseUsage *),
+ path_usage_comparator);
+
+ /*
+ * For each surviving index, consider it as an "AND group leader", and see
+ * whether adding on any of the later indexes results in an AND path with
+ * cheaper total cost than before. Then take the cheapest AND group.
+ *
+ * Note: paths that are either clauseless or unclassifiable will have
+ * empty clauseids, so that they will not be rejected by the clauseids
+ * filter here, nor will they cause later paths to be rejected by it.
+ */
+ for (i = 0; i < npaths; i++)
+ {
+ Cost costsofar;
+ List *qualsofar;
+ Bitmapset *clauseidsofar;
+
+ pathinfo = pathinfoarray[i];
+ paths = list_make1(pathinfo->path);
+ costsofar = bitmap_scan_cost_est(root, rel, pathinfo->path);
+ qualsofar = list_concat_copy(pathinfo->quals, pathinfo->preds);
+ clauseidsofar = bms_copy(pathinfo->clauseids);
+
+ for (j = i + 1; j < npaths; j++)
+ {
+ Cost newcost;
+
+ pathinfo = pathinfoarray[j];
+ /* Check for redundancy */
+ if (bms_overlap(pathinfo->clauseids, clauseidsofar))
+ continue; /* consider it redundant */
+ if (pathinfo->preds)
+ {
+ bool redundant = false;
+
+ /* we check each predicate clause separately */
+ foreach(l, pathinfo->preds)
+ {
+ Node *np = (Node *) lfirst(l);
+
+ if (predicate_implied_by(list_make1(np), qualsofar, false))
+ {
+ redundant = true;
+ break; /* out of inner foreach loop */
+ }
+ }
+ if (redundant)
+ continue;
+ }
+ /* tentatively add new path to paths, so we can estimate cost */
+ paths = lappend(paths, pathinfo->path);
+ newcost = bitmap_and_cost_est(root, rel, paths);
+ if (newcost < costsofar)
+ {
+ /* keep new path in paths, update subsidiary variables */
+ costsofar = newcost;
+ qualsofar = list_concat(qualsofar, pathinfo->quals);
+ qualsofar = list_concat(qualsofar, pathinfo->preds);
+ clauseidsofar = bms_add_members(clauseidsofar,
+ pathinfo->clauseids);
+ }
+ else
+ {
+ /* reject new path, remove it from paths list */
+ paths = list_truncate(paths, list_length(paths) - 1);
+ }
+ }
+
+ /* Keep the cheapest AND-group (or singleton) */
+ if (i == 0 || costsofar < bestcost)
+ {
+ bestpaths = paths;
+ bestcost = costsofar;
+ }
+
+ /* some easy cleanup (we don't try real hard though) */
+ list_free(qualsofar);
+ }
+
+ if (list_length(bestpaths) == 1)
+ return (Path *) linitial(bestpaths); /* no need for AND */
+ return (Path *) create_bitmap_and_path(root, rel, bestpaths);
+}
+
+/* qsort comparator to sort in increasing index access cost order */
+static int
+path_usage_comparator(const void *a, const void *b)
+{
+ PathClauseUsage *pa = *(PathClauseUsage *const *) a;
+ PathClauseUsage *pb = *(PathClauseUsage *const *) b;
+ Cost acost;
+ Cost bcost;
+ Selectivity aselec;
+ Selectivity bselec;
+
+ cost_bitmap_tree_node(pa->path, &acost, &aselec);
+ cost_bitmap_tree_node(pb->path, &bcost, &bselec);
+
+ /*
+ * If costs are the same, sort by selectivity.
+ */
+ if (acost < bcost)
+ return -1;
+ if (acost > bcost)
+ return 1;
+
+ if (aselec < bselec)
+ return -1;
+ if (aselec > bselec)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Estimate the cost of actually executing a bitmap scan with a single
+ * index path (which could be a BitmapAnd or BitmapOr node).
+ */
+static Cost
+bitmap_scan_cost_est(PlannerInfo *root, RelOptInfo *rel, Path *ipath)
+{
+ BitmapHeapPath bpath;
+
+ /* Set up a dummy BitmapHeapPath */
+ bpath.path.type = T_BitmapHeapPath;
+ bpath.path.pathtype = T_BitmapHeapScan;
+ bpath.path.parent = rel;
+ bpath.path.pathtarget = rel->reltarget;
+ bpath.path.param_info = ipath->param_info;
+ bpath.path.pathkeys = NIL;
+ bpath.bitmapqual = ipath;
+
+ /*
+ * Check the cost of temporary path without considering parallelism.
+ * Parallel bitmap heap path will be considered at later stage.
+ */
+ bpath.path.parallel_workers = 0;
+
+ /* Now we can do cost_bitmap_heap_scan */
+ cost_bitmap_heap_scan(&bpath.path, root, rel,
+ bpath.path.param_info,
+ ipath,
+ get_loop_count(root, rel->relid,
+ PATH_REQ_OUTER(ipath)));
+
+ return bpath.path.total_cost;
+}
+
+/*
+ * Estimate the cost of actually executing a BitmapAnd scan with the given
+ * inputs.
+ */
+static Cost
+bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths)
+{
+ BitmapAndPath *apath;
+
+ /*
+ * Might as well build a real BitmapAndPath here, as the work is slightly
+ * too complicated to be worth repeating just to save one palloc.
+ */
+ apath = create_bitmap_and_path(root, rel, paths);
+
+ return bitmap_scan_cost_est(root, rel, (Path *) apath);
+}
+
+
+/*
+ * classify_index_clause_usage
+ * Construct a PathClauseUsage struct describing the WHERE clauses and
+ * index predicate clauses used by the given indexscan path.
+ * We consider two clauses the same if they are equal().
+ *
+ * At some point we might want to migrate this info into the Path data
+ * structure proper, but for the moment it's only needed within
+ * choose_bitmap_and().
+ *
+ * *clauselist is used and expanded as needed to identify all the distinct
+ * clauses seen across successive calls. Caller must initialize it to NIL
+ * before first call of a set.
+ */
+static PathClauseUsage *
+classify_index_clause_usage(Path *path, List **clauselist)
+{
+ PathClauseUsage *result;
+ Bitmapset *clauseids;
+ ListCell *lc;
+
+ result = (PathClauseUsage *) palloc(sizeof(PathClauseUsage));
+ result->path = path;
+
+ /* Recursively find the quals and preds used by the path */
+ result->quals = NIL;
+ result->preds = NIL;
+ find_indexpath_quals(path, &result->quals, &result->preds);
+
+ /*
+ * Some machine-generated queries have outlandish numbers of qual clauses.
+ * To avoid getting into O(N^2) behavior even in this preliminary
+ * classification step, we want to limit the number of entries we can
+ * accumulate in *clauselist. Treat any path with more than 100 quals +
+ * preds as unclassifiable, which will cause calling code to consider it
+ * distinct from all other paths.
+ */
+ if (list_length(result->quals) + list_length(result->preds) > 100)
+ {
+ result->clauseids = NULL;
+ result->unclassifiable = true;
+ return result;
+ }
+
+ /* Build up a bitmapset representing the quals and preds */
+ clauseids = NULL;
+ foreach(lc, result->quals)
+ {
+ Node *node = (Node *) lfirst(lc);
+
+ clauseids = bms_add_member(clauseids,
+ find_list_position(node, clauselist));
+ }
+ foreach(lc, result->preds)
+ {
+ Node *node = (Node *) lfirst(lc);
+
+ clauseids = bms_add_member(clauseids,
+ find_list_position(node, clauselist));
+ }
+ result->clauseids = clauseids;
+ result->unclassifiable = false;
+
+ return result;
+}
+
+
+/*
+ * find_indexpath_quals
+ *
+ * Given the Path structure for a plain or bitmap indexscan, extract lists
+ * of all the index clauses and index predicate conditions used in the Path.
+ * These are appended to the initial contents of *quals and *preds (hence
+ * caller should initialize those to NIL).
+ *
+ * Note we are not trying to produce an accurate representation of the AND/OR
+ * semantics of the Path, but just find out all the base conditions used.
+ *
+ * The result lists contain pointers to the expressions used in the Path,
+ * but all the list cells are freshly built, so it's safe to destructively
+ * modify the lists (eg, by concat'ing with other lists).
+ */
+static void
+find_indexpath_quals(Path *bitmapqual, List **quals, List **preds)
+{
+ if (IsA(bitmapqual, BitmapAndPath))
+ {
+ BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
+ ListCell *l;
+
+ foreach(l, apath->bitmapquals)
+ {
+ find_indexpath_quals((Path *) lfirst(l), quals, preds);
+ }
+ }
+ else if (IsA(bitmapqual, BitmapOrPath))
+ {
+ BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
+ ListCell *l;
+
+ foreach(l, opath->bitmapquals)
+ {
+ find_indexpath_quals((Path *) lfirst(l), quals, preds);
+ }
+ }
+ else if (IsA(bitmapqual, IndexPath))
+ {
+ IndexPath *ipath = (IndexPath *) bitmapqual;
+ ListCell *l;
+
+ foreach(l, ipath->indexclauses)
+ {
+ IndexClause *iclause = (IndexClause *) lfirst(l);
+
+ *quals = lappend(*quals, iclause->rinfo->clause);
+ }
+ *preds = list_concat(*preds, ipath->indexinfo->indpred);
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
+}
+
+
+/*
+ * find_list_position
+ * Return the given node's position (counting from 0) in the given
+ * list of nodes. If it's not equal() to any existing list member,
+ * add it at the end, and return that position.
+ */
+static int
+find_list_position(Node *node, List **nodelist)
+{
+ int i;
+ ListCell *lc;
+
+ i = 0;
+ foreach(lc, *nodelist)
+ {
+ Node *oldnode = (Node *) lfirst(lc);
+
+ if (equal(node, oldnode))
+ return i;
+ i++;
+ }
+
+ *nodelist = lappend(*nodelist, node);
+
+ return i;
+}
+
+
+/*
+ * check_index_only
+ * Determine whether an index-only scan is possible for this index.
+ */
+static bool
+check_index_only(RelOptInfo *rel, IndexOptInfo *index)
+{
+ bool result;
+ Bitmapset *attrs_used = NULL;
+ Bitmapset *index_canreturn_attrs = NULL;
+ ListCell *lc;
+ int i;
+
+ /* Index-only scans must be enabled */
+ if (!enable_indexonlyscan)
+ return false;
+
+ /*
+ * Check that all needed attributes of the relation are available from the
+ * index.
+ */
+
+ /*
+ * First, identify all the attributes needed for joins or final output.
+ * Note: we must look at rel's targetlist, not the attr_needed data,
+ * because attr_needed isn't computed for inheritance child rels.
+ */
+ pull_varattnos((Node *) rel->reltarget->exprs, rel->relid, &attrs_used);
+
+ /*
+ * Add all the attributes used by restriction clauses; but consider only
+ * those clauses not implied by the index predicate, since ones that are
+ * so implied don't need to be checked explicitly in the plan.
+ *
+ * Note: attributes used only in index quals would not be needed at
+ * runtime either, if we are certain that the index is not lossy. However
+ * it'd be complicated to account for that accurately, and it doesn't
+ * matter in most cases, since we'd conclude that such attributes are
+ * available from the index anyway.
+ */
+ foreach(lc, index->indrestrictinfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
+ }
+
+ /*
+ * Construct a bitmapset of columns that the index can return back in an
+ * index-only scan.
+ */
+ for (i = 0; i < index->ncolumns; i++)
+ {
+ int attno = index->indexkeys[i];
+
+ /*
+ * For the moment, we just ignore index expressions. It might be nice
+ * to do something with them, later.
+ */
+ if (attno == 0)
+ continue;
+
+ if (index->canreturn[i])
+ index_canreturn_attrs =
+ bms_add_member(index_canreturn_attrs,
+ attno - FirstLowInvalidHeapAttributeNumber);
+ }
+
+ /* Do we have all the necessary attributes? */
+ result = bms_is_subset(attrs_used, index_canreturn_attrs);
+
+ bms_free(attrs_used);
+ bms_free(index_canreturn_attrs);
+
+ return result;
+}
+
+/*
+ * get_loop_count
+ * Choose the loop count estimate to use for costing a parameterized path
+ * with the given set of outer relids.
+ *
+ * Since we produce parameterized paths before we've begun to generate join
+ * relations, it's impossible to predict exactly how many times a parameterized
+ * path will be iterated; we don't know the size of the relation that will be
+ * on the outside of the nestloop. However, we should try to account for
+ * multiple iterations somehow in costing the path. The heuristic embodied
+ * here is to use the rowcount of the smallest other base relation needed in
+ * the join clauses used by the path. (We could alternatively consider the
+ * largest one, but that seems too optimistic.) This is of course the right
+ * answer for single-other-relation cases, and it seems like a reasonable
+ * zero-order approximation for multiway-join cases.
+ *
+ * In addition, we check to see if the other side of each join clause is on
+ * the inside of some semijoin that the current relation is on the outside of.
+ * If so, the only way that a parameterized path could be used is if the
+ * semijoin RHS has been unique-ified, so we should use the number of unique
+ * RHS rows rather than using the relation's raw rowcount.
+ *
+ * Note: for this to work, allpaths.c must establish all baserel size
+ * estimates before it begins to compute paths, or at least before it
+ * calls create_index_paths().
+ */
+static double
+get_loop_count(PlannerInfo *root, Index cur_relid, Relids outer_relids)
+{
+ double result;
+ int outer_relid;
+
+ /* For a non-parameterized path, just return 1.0 quickly */
+ if (outer_relids == NULL)
+ return 1.0;
+
+ result = 0.0;
+ outer_relid = -1;
+ while ((outer_relid = bms_next_member(outer_relids, outer_relid)) >= 0)
+ {
+ RelOptInfo *outer_rel;
+ double rowcount;
+
+ /* Paranoia: ignore bogus relid indexes */
+ if (outer_relid >= root->simple_rel_array_size)
+ continue;
+ outer_rel = root->simple_rel_array[outer_relid];
+ if (outer_rel == NULL)
+ continue;
+ Assert(outer_rel->relid == outer_relid); /* sanity check on array */
+
+ /* Other relation could be proven empty, if so ignore */
+ if (IS_DUMMY_REL(outer_rel))
+ continue;
+
+ /* Otherwise, rel's rows estimate should be valid by now */
+ Assert(outer_rel->rows > 0);
+
+ /* Check to see if rel is on the inside of any semijoins */
+ rowcount = adjust_rowcount_for_semijoins(root,
+ cur_relid,
+ outer_relid,
+ outer_rel->rows);
+
+ /* Remember smallest row count estimate among the outer rels */
+ if (result == 0.0 || result > rowcount)
+ result = rowcount;
+ }
+ /* Return 1.0 if we found no valid relations (shouldn't happen) */
+ return (result > 0.0) ? result : 1.0;
+}
+
+/*
+ * Check to see if outer_relid is on the inside of any semijoin that cur_relid
+ * is on the outside of. If so, replace rowcount with the estimated number of
+ * unique rows from the semijoin RHS (assuming that's smaller, which it might
+ * not be). The estimate is crude but it's the best we can do at this stage
+ * of the proceedings.
+ */
+static double
+adjust_rowcount_for_semijoins(PlannerInfo *root,
+ Index cur_relid,
+ Index outer_relid,
+ double rowcount)
+{
+ ListCell *lc;
+
+ foreach(lc, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
+
+ if (sjinfo->jointype == JOIN_SEMI &&
+ bms_is_member(cur_relid, sjinfo->syn_lefthand) &&
+ bms_is_member(outer_relid, sjinfo->syn_righthand))
+ {
+ /* Estimate number of unique-ified rows */
+ double nraw;
+ double nunique;
+
+ nraw = approximate_joinrel_size(root, sjinfo->syn_righthand);
+ nunique = estimate_num_groups(root,
+ sjinfo->semi_rhs_exprs,
+ nraw,
+ NULL,
+ NULL);
+ if (rowcount > nunique)
+ rowcount = nunique;
+ }
+ }
+ return rowcount;
+}
+
+/*
+ * Make an approximate estimate of the size of a joinrel.
+ *
+ * We don't have enough info at this point to get a good estimate, so we
+ * just multiply the base relation sizes together. Fortunately, this is
+ * the right answer anyway for the most common case with a single relation
+ * on the RHS of a semijoin. Also, estimate_num_groups() has only a weak
+ * dependency on its input_rows argument (it basically uses it as a clamp).
+ * So we might be able to get a fairly decent end result even with a severe
+ * overestimate of the RHS's raw size.
+ */
+static double
+approximate_joinrel_size(PlannerInfo *root, Relids relids)
+{
+ double rowcount = 1.0;
+ int relid;
+
+ relid = -1;
+ while ((relid = bms_next_member(relids, relid)) >= 0)
+ {
+ RelOptInfo *rel;
+
+ /* Paranoia: ignore bogus relid indexes */
+ if (relid >= root->simple_rel_array_size)
+ continue;
+ rel = root->simple_rel_array[relid];
+ if (rel == NULL)
+ continue;
+ Assert(rel->relid == relid); /* sanity check on array */
+
+ /* Relation could be proven empty, if so ignore */
+ if (IS_DUMMY_REL(rel))
+ continue;
+
+ /* Otherwise, rel's rows estimate should be valid by now */
+ Assert(rel->rows > 0);
+
+ /* Accumulate product */
+ rowcount *= rel->rows;
+ }
+ return rowcount;
+}
+
+
+/****************************************************************************
+ * ---- ROUTINES TO CHECK QUERY CLAUSES ----
+ ****************************************************************************/
+
+/*
+ * match_restriction_clauses_to_index
+ * Identify restriction clauses for the rel that match the index.
+ * Matching clauses are added to *clauseset.
+ */
+static void
+match_restriction_clauses_to_index(PlannerInfo *root,
+ IndexOptInfo *index,
+ IndexClauseSet *clauseset)
+{
+ /* We can ignore clauses that are implied by the index predicate */
+ match_clauses_to_index(root, index->indrestrictinfo, index, clauseset);
+}
+
+/*
+ * match_join_clauses_to_index
+ * Identify join clauses for the rel that match the index.
+ * Matching clauses are added to *clauseset.
+ * Also, add any potentially usable join OR clauses to *joinorclauses.
+ */
+static void
+match_join_clauses_to_index(PlannerInfo *root,
+ RelOptInfo *rel, IndexOptInfo *index,
+ IndexClauseSet *clauseset,
+ List **joinorclauses)
+{
+ ListCell *lc;
+
+ /* Scan the rel's join clauses */
+ foreach(lc, rel->joininfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ /* Check if clause can be moved to this rel */
+ if (!join_clause_is_movable_to(rinfo, rel))
+ continue;
+
+ /* Potentially usable, so see if it matches the index or is an OR */
+ if (restriction_is_or_clause(rinfo))
+ *joinorclauses = lappend(*joinorclauses, rinfo);
+ else
+ match_clause_to_index(root, rinfo, index, clauseset);
+ }
+}
+
+/*
+ * match_eclass_clauses_to_index
+ * Identify EquivalenceClass join clauses for the rel that match the index.
+ * Matching clauses are added to *clauseset.
+ */
+static void
+match_eclass_clauses_to_index(PlannerInfo *root, IndexOptInfo *index,
+ IndexClauseSet *clauseset)
+{
+ int indexcol;
+
+ /* No work if rel is not in any such ECs */
+ if (!index->rel->has_eclass_joins)
+ return;
+
+ for (indexcol = 0; indexcol < index->nkeycolumns; indexcol++)
+ {
+ ec_member_matches_arg arg;
+ List *clauses;
+
+ /* Generate clauses, skipping any that join to lateral_referencers */
+ arg.index = index;
+ arg.indexcol = indexcol;
+ clauses = generate_implied_equalities_for_column(root,
+ index->rel,
+ ec_member_matches_indexcol,
+ (void *) &arg,
+ index->rel->lateral_referencers);
+
+ /*
+ * We have to check whether the results actually do match the index,
+ * since for non-btree indexes the EC's equality operators might not
+ * be in the index opclass (cf ec_member_matches_indexcol).
+ */
+ match_clauses_to_index(root, clauses, index, clauseset);
+ }
+}
+
+/*
+ * match_clauses_to_index
+ * Perform match_clause_to_index() for each clause in a list.
+ * Matching clauses are added to *clauseset.
+ */
+static void
+match_clauses_to_index(PlannerInfo *root,
+ List *clauses,
+ IndexOptInfo *index,
+ IndexClauseSet *clauseset)
+{
+ ListCell *lc;
+
+ foreach(lc, clauses)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
+
+ match_clause_to_index(root, rinfo, index, clauseset);
+ }
+}
+
+/*
+ * match_clause_to_index
+ * Test whether a qual clause can be used with an index.
+ *
+ * If the clause is usable, add an IndexClause entry for it to the appropriate
+ * list in *clauseset. (*clauseset must be initialized to zeroes before first
+ * call.)
+ *
+ * Note: in some circumstances we may find the same RestrictInfos coming from
+ * multiple places. Defend against redundant outputs by refusing to add a
+ * clause twice (pointer equality should be a good enough check for this).
+ *
+ * Note: it's possible that a badly-defined index could have multiple matching
+ * columns. We always select the first match if so; this avoids scenarios
+ * wherein we get an inflated idea of the index's selectivity by using the
+ * same clause multiple times with different index columns.
+ */
+static void
+match_clause_to_index(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ IndexOptInfo *index,
+ IndexClauseSet *clauseset)
+{
+ int indexcol;
+
+ /*
+ * Never match pseudoconstants to indexes. (Normally a match could not
+ * happen anyway, since a pseudoconstant clause couldn't contain a Var,
+ * but what if someone builds an expression index on a constant? It's not
+ * totally unreasonable to do so with a partial index, either.)
+ */
+ if (rinfo->pseudoconstant)
+ return;
+
+ /*
+ * If clause can't be used as an indexqual because it must wait till after
+ * some lower-security-level restriction clause, reject it.
+ */
+ if (!restriction_is_securely_promotable(rinfo, index->rel))
+ return;
+
+ /* OK, check each index key column for a match */
+ for (indexcol = 0; indexcol < index->nkeycolumns; indexcol++)
+ {
+ IndexClause *iclause;
+ ListCell *lc;
+
+ /* Ignore duplicates */
+ foreach(lc, clauseset->indexclauses[indexcol])
+ {
+ IndexClause *iclause = (IndexClause *) lfirst(lc);
+
+ if (iclause->rinfo == rinfo)
+ return;
+ }
+
+ /* OK, try to match the clause to the index column */
+ iclause = match_clause_to_indexcol(root,
+ rinfo,
+ indexcol,
+ index);
+ if (iclause)
+ {
+ /* Success, so record it */
+ clauseset->indexclauses[indexcol] =
+ lappend(clauseset->indexclauses[indexcol], iclause);
+ clauseset->nonempty = true;
+ return;
+ }
+ }
+}
+
+/*
+ * match_clause_to_indexcol()
+ * Determine whether a restriction clause matches a column of an index,
+ * and if so, build an IndexClause node describing the details.
+ *
+ * To match an index normally, an operator clause:
+ *
+ * (1) must be in the form (indexkey op const) or (const op indexkey);
+ * and
+ * (2) must contain an operator which is in the index's operator family
+ * for this column; and
+ * (3) must match the collation of the index, if collation is relevant.
+ *
+ * Our definition of "const" is exceedingly liberal: we allow anything that
+ * doesn't involve a volatile function or a Var of the index's relation.
+ * In particular, Vars belonging to other relations of the query are
+ * accepted here, since a clause of that form can be used in a
+ * parameterized indexscan. It's the responsibility of higher code levels
+ * to manage restriction and join clauses appropriately.
+ *
+ * Note: we do need to check for Vars of the index's relation on the
+ * "const" side of the clause, since clauses like (a.f1 OP (b.f2 OP a.f3))
+ * are not processable by a parameterized indexscan on a.f1, whereas
+ * something like (a.f1 OP (b.f2 OP c.f3)) is.
+ *
+ * Presently, the executor can only deal with indexquals that have the
+ * indexkey on the left, so we can only use clauses that have the indexkey
+ * on the right if we can commute the clause to put the key on the left.
+ * We handle that by generating an IndexClause with the correctly-commuted
+ * opclause as a derived indexqual.
+ *
+ * If the index has a collation, the clause must have the same collation.
+ * For collation-less indexes, we assume it doesn't matter; this is
+ * necessary for cases like "hstore ? text", wherein hstore's operators
+ * don't care about collation but the clause will get marked with a
+ * collation anyway because of the text argument. (This logic is
+ * embodied in the macro IndexCollMatchesExprColl.)
+ *
+ * It is also possible to match RowCompareExpr clauses to indexes (but
+ * currently, only btree indexes handle this).
+ *
+ * It is also possible to match ScalarArrayOpExpr clauses to indexes, when
+ * the clause is of the form "indexkey op ANY (arrayconst)".
+ *
+ * For boolean indexes, it is also possible to match the clause directly
+ * to the indexkey; or perhaps the clause is (NOT indexkey).
+ *
+ * And, last but not least, some operators and functions can be processed
+ * to derive (typically lossy) indexquals from a clause that isn't in
+ * itself indexable. If we see that any operand of an OpExpr or FuncExpr
+ * matches the index key, and the function has a planner support function
+ * attached to it, we'll invoke the support function to see if such an
+ * indexqual can be built.
+ *
+ * 'rinfo' is the clause to be tested (as a RestrictInfo node).
+ * 'indexcol' is a column number of 'index' (counting from 0).
+ * 'index' is the index of interest.
+ *
+ * Returns an IndexClause if the clause can be used with this index key,
+ * or NULL if not.
+ *
+ * NOTE: returns NULL if clause is an OR or AND clause; it is the
+ * responsibility of higher-level routines to cope with those.
+ */
+static IndexClause *
+match_clause_to_indexcol(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index)
+{
+ IndexClause *iclause;
+ Expr *clause = rinfo->clause;
+ Oid opfamily;
+
+ Assert(indexcol < index->nkeycolumns);
+
+ /*
+ * Historically this code has coped with NULL clauses. That's probably
+ * not possible anymore, but we might as well continue to cope.
+ */
+ if (clause == NULL)
+ return NULL;
+
+ /* First check for boolean-index cases. */
+ opfamily = index->opfamily[indexcol];
+ if (IsBooleanOpfamily(opfamily))
+ {
+ iclause = match_boolean_index_clause(root, rinfo, indexcol, index);
+ if (iclause)
+ return iclause;
+ }
+
+ /*
+ * Clause must be an opclause, funcclause, ScalarArrayOpExpr, or
+ * RowCompareExpr. Or, if the index supports it, we can handle IS
+ * NULL/NOT NULL clauses.
+ */
+ if (IsA(clause, OpExpr))
+ {
+ return match_opclause_to_indexcol(root, rinfo, indexcol, index);
+ }
+ else if (IsA(clause, FuncExpr))
+ {
+ return match_funcclause_to_indexcol(root, rinfo, indexcol, index);
+ }
+ else if (IsA(clause, ScalarArrayOpExpr))
+ {
+ return match_saopclause_to_indexcol(root, rinfo, indexcol, index);
+ }
+ else if (IsA(clause, RowCompareExpr))
+ {
+ return match_rowcompare_to_indexcol(root, rinfo, indexcol, index);
+ }
+ else if (index->amsearchnulls && IsA(clause, NullTest))
+ {
+ NullTest *nt = (NullTest *) clause;
+
+ if (!nt->argisrow &&
+ match_index_to_operand((Node *) nt->arg, indexcol, index))
+ {
+ iclause = makeNode(IndexClause);
+ iclause->rinfo = rinfo;
+ iclause->indexquals = list_make1(rinfo);
+ iclause->lossy = false;
+ iclause->indexcol = indexcol;
+ iclause->indexcols = NIL;
+ return iclause;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * match_boolean_index_clause
+ * Recognize restriction clauses that can be matched to a boolean index.
+ *
+ * The idea here is that, for an index on a boolean column that supports the
+ * BooleanEqualOperator, we can transform a plain reference to the indexkey
+ * into "indexkey = true", or "NOT indexkey" into "indexkey = false", etc,
+ * so as to make the expression indexable using the index's "=" operator.
+ * Since Postgres 8.1, we must do this because constant simplification does
+ * the reverse transformation; without this code there'd be no way to use
+ * such an index at all.
+ *
+ * This should be called only when IsBooleanOpfamily() recognizes the
+ * index's operator family. We check to see if the clause matches the
+ * index's key, and if so, build a suitable IndexClause.
+ */
+static IndexClause *
+match_boolean_index_clause(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index)
+{
+ Node *clause = (Node *) rinfo->clause;
+ Expr *op = NULL;
+
+ /* Direct match? */
+ if (match_index_to_operand(clause, indexcol, index))
+ {
+ /* convert to indexkey = TRUE */
+ op = make_opclause(BooleanEqualOperator, BOOLOID, false,
+ (Expr *) clause,
+ (Expr *) makeBoolConst(true, false),
+ InvalidOid, InvalidOid);
+ }
+ /* NOT clause? */
+ else if (is_notclause(clause))
+ {
+ Node *arg = (Node *) get_notclausearg((Expr *) clause);
+
+ if (match_index_to_operand(arg, indexcol, index))
+ {
+ /* convert to indexkey = FALSE */
+ op = make_opclause(BooleanEqualOperator, BOOLOID, false,
+ (Expr *) arg,
+ (Expr *) makeBoolConst(false, false),
+ InvalidOid, InvalidOid);
+ }
+ }
+
+ /*
+ * Since we only consider clauses at top level of WHERE, we can convert
+ * indexkey IS TRUE and indexkey IS FALSE to index searches as well. The
+ * different meaning for NULL isn't important.
+ */
+ else if (clause && IsA(clause, BooleanTest))
+ {
+ BooleanTest *btest = (BooleanTest *) clause;
+ Node *arg = (Node *) btest->arg;
+
+ if (btest->booltesttype == IS_TRUE &&
+ match_index_to_operand(arg, indexcol, index))
+ {
+ /* convert to indexkey = TRUE */
+ op = make_opclause(BooleanEqualOperator, BOOLOID, false,
+ (Expr *) arg,
+ (Expr *) makeBoolConst(true, false),
+ InvalidOid, InvalidOid);
+ }
+ else if (btest->booltesttype == IS_FALSE &&
+ match_index_to_operand(arg, indexcol, index))
+ {
+ /* convert to indexkey = FALSE */
+ op = make_opclause(BooleanEqualOperator, BOOLOID, false,
+ (Expr *) arg,
+ (Expr *) makeBoolConst(false, false),
+ InvalidOid, InvalidOid);
+ }
+ }
+
+ /*
+ * If we successfully made an operator clause from the given qual, we must
+ * wrap it in an IndexClause. It's not lossy.
+ */
+ if (op)
+ {
+ IndexClause *iclause = makeNode(IndexClause);
+
+ iclause->rinfo = rinfo;
+ iclause->indexquals = list_make1(make_simple_restrictinfo(root, op));
+ iclause->lossy = false;
+ iclause->indexcol = indexcol;
+ iclause->indexcols = NIL;
+ return iclause;
+ }
+
+ return NULL;
+}
+
+/*
+ * match_opclause_to_indexcol()
+ * Handles the OpExpr case for match_clause_to_indexcol(),
+ * which see for comments.
+ */
+static IndexClause *
+match_opclause_to_indexcol(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index)
+{
+ IndexClause *iclause;
+ OpExpr *clause = (OpExpr *) rinfo->clause;
+ Node *leftop,
+ *rightop;
+ Oid expr_op;
+ Oid expr_coll;
+ Index index_relid;
+ Oid opfamily;
+ Oid idxcollation;
+
+ /*
+ * Only binary operators need apply. (In theory, a planner support
+ * function could do something with a unary operator, but it seems
+ * unlikely to be worth the cycles to check.)
+ */
+ if (list_length(clause->args) != 2)
+ return NULL;
+
+ leftop = (Node *) linitial(clause->args);
+ rightop = (Node *) lsecond(clause->args);
+ expr_op = clause->opno;
+ expr_coll = clause->inputcollid;
+
+ index_relid = index->rel->relid;
+ opfamily = index->opfamily[indexcol];
+ idxcollation = index->indexcollations[indexcol];
+
+ /*
+ * Check for clauses of the form: (indexkey operator constant) or
+ * (constant operator indexkey). See match_clause_to_indexcol's notes
+ * about const-ness.
+ *
+ * Note that we don't ask the support function about clauses that don't
+ * have one of these forms. Again, in principle it might be possible to
+ * do something, but it seems unlikely to be worth the cycles to check.
+ */
+ if (match_index_to_operand(leftop, indexcol, index) &&
+ !bms_is_member(index_relid, rinfo->right_relids) &&
+ !contain_volatile_functions(rightop))
+ {
+ if (IndexCollMatchesExprColl(idxcollation, expr_coll) &&
+ op_in_opfamily(expr_op, opfamily))
+ {
+ iclause = makeNode(IndexClause);
+ iclause->rinfo = rinfo;
+ iclause->indexquals = list_make1(rinfo);
+ iclause->lossy = false;
+ iclause->indexcol = indexcol;
+ iclause->indexcols = NIL;
+ return iclause;
+ }
+
+ /*
+ * If we didn't find a member of the index's opfamily, try the support
+ * function for the operator's underlying function.
+ */
+ set_opfuncid(clause); /* make sure we have opfuncid */
+ return get_index_clause_from_support(root,
+ rinfo,
+ clause->opfuncid,
+ 0, /* indexarg on left */
+ indexcol,
+ index);
+ }
+
+ if (match_index_to_operand(rightop, indexcol, index) &&
+ !bms_is_member(index_relid, rinfo->left_relids) &&
+ !contain_volatile_functions(leftop))
+ {
+ if (IndexCollMatchesExprColl(idxcollation, expr_coll))
+ {
+ Oid comm_op = get_commutator(expr_op);
+
+ if (OidIsValid(comm_op) &&
+ op_in_opfamily(comm_op, opfamily))
+ {
+ RestrictInfo *commrinfo;
+
+ /* Build a commuted OpExpr and RestrictInfo */
+ commrinfo = commute_restrictinfo(rinfo, comm_op);
+
+ /* Make an IndexClause showing that as a derived qual */
+ iclause = makeNode(IndexClause);
+ iclause->rinfo = rinfo;
+ iclause->indexquals = list_make1(commrinfo);
+ iclause->lossy = false;
+ iclause->indexcol = indexcol;
+ iclause->indexcols = NIL;
+ return iclause;
+ }
+ }
+
+ /*
+ * If we didn't find a member of the index's opfamily, try the support
+ * function for the operator's underlying function.
+ */
+ set_opfuncid(clause); /* make sure we have opfuncid */
+ return get_index_clause_from_support(root,
+ rinfo,
+ clause->opfuncid,
+ 1, /* indexarg on right */
+ indexcol,
+ index);
+ }
+
+ return NULL;
+}
+
+/*
+ * match_funcclause_to_indexcol()
+ * Handles the FuncExpr case for match_clause_to_indexcol(),
+ * which see for comments.
+ */
+static IndexClause *
+match_funcclause_to_indexcol(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index)
+{
+ FuncExpr *clause = (FuncExpr *) rinfo->clause;
+ int indexarg;
+ ListCell *lc;
+
+ /*
+ * We have no built-in intelligence about function clauses, but if there's
+ * a planner support function, it might be able to do something. But, to
+ * cut down on wasted planning cycles, only call the support function if
+ * at least one argument matches the target index column.
+ *
+ * Note that we don't insist on the other arguments being pseudoconstants;
+ * the support function has to check that. This is to allow cases where
+ * only some of the other arguments need to be included in the indexqual.
+ */
+ indexarg = 0;
+ foreach(lc, clause->args)
+ {
+ Node *op = (Node *) lfirst(lc);
+
+ if (match_index_to_operand(op, indexcol, index))
+ {
+ return get_index_clause_from_support(root,
+ rinfo,
+ clause->funcid,
+ indexarg,
+ indexcol,
+ index);
+ }
+
+ indexarg++;
+ }
+
+ return NULL;
+}
+
+/*
+ * get_index_clause_from_support()
+ * If the function has a planner support function, try to construct
+ * an IndexClause using indexquals created by the support function.
+ */
+static IndexClause *
+get_index_clause_from_support(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ Oid funcid,
+ int indexarg,
+ int indexcol,
+ IndexOptInfo *index)
+{
+ Oid prosupport = get_func_support(funcid);
+ SupportRequestIndexCondition req;
+ List *sresult;
+
+ if (!OidIsValid(prosupport))
+ return NULL;
+
+ req.type = T_SupportRequestIndexCondition;
+ req.root = root;
+ req.funcid = funcid;
+ req.node = (Node *) rinfo->clause;
+ req.indexarg = indexarg;
+ req.index = index;
+ req.indexcol = indexcol;
+ req.opfamily = index->opfamily[indexcol];
+ req.indexcollation = index->indexcollations[indexcol];
+
+ req.lossy = true; /* default assumption */
+
+ sresult = (List *)
+ DatumGetPointer(OidFunctionCall1(prosupport,
+ PointerGetDatum(&req)));
+
+ if (sresult != NIL)
+ {
+ IndexClause *iclause = makeNode(IndexClause);
+ List *indexquals = NIL;
+ ListCell *lc;
+
+ /*
+ * The support function API says it should just give back bare
+ * clauses, so here we must wrap each one in a RestrictInfo.
+ */
+ foreach(lc, sresult)
+ {
+ Expr *clause = (Expr *) lfirst(lc);
+
+ indexquals = lappend(indexquals,
+ make_simple_restrictinfo(root, clause));
+ }
+
+ iclause->rinfo = rinfo;
+ iclause->indexquals = indexquals;
+ iclause->lossy = req.lossy;
+ iclause->indexcol = indexcol;
+ iclause->indexcols = NIL;
+
+ return iclause;
+ }
+
+ return NULL;
+}
+
+/*
+ * match_saopclause_to_indexcol()
+ * Handles the ScalarArrayOpExpr case for match_clause_to_indexcol(),
+ * which see for comments.
+ */
+static IndexClause *
+match_saopclause_to_indexcol(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index)
+{
+ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
+ Node *leftop,
+ *rightop;
+ Relids right_relids;
+ Oid expr_op;
+ Oid expr_coll;
+ Index index_relid;
+ Oid opfamily;
+ Oid idxcollation;
+
+ /* We only accept ANY clauses, not ALL */
+ if (!saop->useOr)
+ return NULL;
+ leftop = (Node *) linitial(saop->args);
+ rightop = (Node *) lsecond(saop->args);
+ right_relids = pull_varnos(root, rightop);
+ expr_op = saop->opno;
+ expr_coll = saop->inputcollid;
+
+ index_relid = index->rel->relid;
+ opfamily = index->opfamily[indexcol];
+ idxcollation = index->indexcollations[indexcol];
+
+ /*
+ * We must have indexkey on the left and a pseudo-constant array argument.
+ */
+ if (match_index_to_operand(leftop, indexcol, index) &&
+ !bms_is_member(index_relid, right_relids) &&
+ !contain_volatile_functions(rightop))
+ {
+ if (IndexCollMatchesExprColl(idxcollation, expr_coll) &&
+ op_in_opfamily(expr_op, opfamily))
+ {
+ IndexClause *iclause = makeNode(IndexClause);
+
+ iclause->rinfo = rinfo;
+ iclause->indexquals = list_make1(rinfo);
+ iclause->lossy = false;
+ iclause->indexcol = indexcol;
+ iclause->indexcols = NIL;
+ return iclause;
+ }
+
+ /*
+ * We do not currently ask support functions about ScalarArrayOpExprs,
+ * though in principle we could.
+ */
+ }
+
+ return NULL;
+}
+
+/*
+ * match_rowcompare_to_indexcol()
+ * Handles the RowCompareExpr case for match_clause_to_indexcol(),
+ * which see for comments.
+ *
+ * In this routine we check whether the first column of the row comparison
+ * matches the target index column. This is sufficient to guarantee that some
+ * index condition can be constructed from the RowCompareExpr --- the rest
+ * is handled by expand_indexqual_rowcompare().
+ */
+static IndexClause *
+match_rowcompare_to_indexcol(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index)
+{
+ RowCompareExpr *clause = (RowCompareExpr *) rinfo->clause;
+ Index index_relid;
+ Oid opfamily;
+ Oid idxcollation;
+ Node *leftop,
+ *rightop;
+ bool var_on_left;
+ Oid expr_op;
+ Oid expr_coll;
+
+ /* Forget it if we're not dealing with a btree index */
+ if (index->relam != BTREE_AM_OID)
+ return NULL;
+
+ index_relid = index->rel->relid;
+ opfamily = index->opfamily[indexcol];
+ idxcollation = index->indexcollations[indexcol];
+
+ /*
+ * We could do the matching on the basis of insisting that the opfamily
+ * shown in the RowCompareExpr be the same as the index column's opfamily,
+ * but that could fail in the presence of reverse-sort opfamilies: it'd be
+ * a matter of chance whether RowCompareExpr had picked the forward or
+ * reverse-sort family. So look only at the operator, and match if it is
+ * a member of the index's opfamily (after commutation, if the indexkey is
+ * on the right). We'll worry later about whether any additional
+ * operators are matchable to the index.
+ */
+ leftop = (Node *) linitial(clause->largs);
+ rightop = (Node *) linitial(clause->rargs);
+ expr_op = linitial_oid(clause->opnos);
+ expr_coll = linitial_oid(clause->inputcollids);
+
+ /* Collations must match, if relevant */
+ if (!IndexCollMatchesExprColl(idxcollation, expr_coll))
+ return NULL;
+
+ /*
+ * These syntactic tests are the same as in match_opclause_to_indexcol()
+ */
+ if (match_index_to_operand(leftop, indexcol, index) &&
+ !bms_is_member(index_relid, pull_varnos(root, rightop)) &&
+ !contain_volatile_functions(rightop))
+ {
+ /* OK, indexkey is on left */
+ var_on_left = true;
+ }
+ else if (match_index_to_operand(rightop, indexcol, index) &&
+ !bms_is_member(index_relid, pull_varnos(root, leftop)) &&
+ !contain_volatile_functions(leftop))
+ {
+ /* indexkey is on right, so commute the operator */
+ expr_op = get_commutator(expr_op);
+ if (expr_op == InvalidOid)
+ return NULL;
+ var_on_left = false;
+ }
+ else
+ return NULL;
+
+ /* We're good if the operator is the right type of opfamily member */
+ switch (get_op_opfamily_strategy(expr_op, opfamily))
+ {
+ case BTLessStrategyNumber:
+ case BTLessEqualStrategyNumber:
+ case BTGreaterEqualStrategyNumber:
+ case BTGreaterStrategyNumber:
+ return expand_indexqual_rowcompare(root,
+ rinfo,
+ indexcol,
+ index,
+ expr_op,
+ var_on_left);
+ }
+
+ return NULL;
+}
+
+/*
+ * expand_indexqual_rowcompare --- expand a single indexqual condition
+ * that is a RowCompareExpr
+ *
+ * It's already known that the first column of the row comparison matches
+ * the specified column of the index. We can use additional columns of the
+ * row comparison as index qualifications, so long as they match the index
+ * in the "same direction", ie, the indexkeys are all on the same side of the
+ * clause and the operators are all the same-type members of the opfamilies.
+ *
+ * If all the columns of the RowCompareExpr match in this way, we just use it
+ * as-is, except for possibly commuting it to put the indexkeys on the left.
+ *
+ * Otherwise, we build a shortened RowCompareExpr (if more than one
+ * column matches) or a simple OpExpr (if the first-column match is all
+ * there is). In these cases the modified clause is always "<=" or ">="
+ * even when the original was "<" or ">" --- this is necessary to match all
+ * the rows that could match the original. (We are building a lossy version
+ * of the row comparison when we do this, so we set lossy = true.)
+ *
+ * Note: this is really just the last half of match_rowcompare_to_indexcol,
+ * but we split it out for comprehensibility.
+ */
+static IndexClause *
+expand_indexqual_rowcompare(PlannerInfo *root,
+ RestrictInfo *rinfo,
+ int indexcol,
+ IndexOptInfo *index,
+ Oid expr_op,
+ bool var_on_left)
+{
+ IndexClause *iclause = makeNode(IndexClause);
+ RowCompareExpr *clause = (RowCompareExpr *) rinfo->clause;
+ int op_strategy;
+ Oid op_lefttype;
+ Oid op_righttype;
+ int matching_cols;
+ List *expr_ops;
+ List *opfamilies;
+ List *lefttypes;
+ List *righttypes;
+ List *new_ops;
+ List *var_args;
+ List *non_var_args;
+
+ iclause->rinfo = rinfo;
+ iclause->indexcol = indexcol;
+
+ if (var_on_left)
+ {
+ var_args = clause->largs;
+ non_var_args = clause->rargs;
+ }
+ else
+ {
+ var_args = clause->rargs;
+ non_var_args = clause->largs;
+ }
+
+ get_op_opfamily_properties(expr_op, index->opfamily[indexcol], false,
+ &op_strategy,
+ &op_lefttype,
+ &op_righttype);
+
+ /* Initialize returned list of which index columns are used */
+ iclause->indexcols = list_make1_int(indexcol);
+
+ /* Build lists of ops, opfamilies and operator datatypes in case needed */
+ expr_ops = list_make1_oid(expr_op);
+ opfamilies = list_make1_oid(index->opfamily[indexcol]);
+ lefttypes = list_make1_oid(op_lefttype);
+ righttypes = list_make1_oid(op_righttype);
+
+ /*
+ * See how many of the remaining columns match some index column in the
+ * same way. As in match_clause_to_indexcol(), the "other" side of any
+ * potential index condition is OK as long as it doesn't use Vars from the
+ * indexed relation.
+ */
+ matching_cols = 1;
+
+ while (matching_cols < list_length(var_args))
+ {
+ Node *varop = (Node *) list_nth(var_args, matching_cols);
+ Node *constop = (Node *) list_nth(non_var_args, matching_cols);
+ int i;
+
+ expr_op = list_nth_oid(clause->opnos, matching_cols);
+ if (!var_on_left)
+ {
+ /* indexkey is on right, so commute the operator */
+ expr_op = get_commutator(expr_op);
+ if (expr_op == InvalidOid)
+ break; /* operator is not usable */
+ }
+ if (bms_is_member(index->rel->relid, pull_varnos(root, constop)))
+ break; /* no good, Var on wrong side */
+ if (contain_volatile_functions(constop))
+ break; /* no good, volatile comparison value */
+
+ /*
+ * The Var side can match any key column of the index.
+ */
+ for (i = 0; i < index->nkeycolumns; i++)
+ {
+ if (match_index_to_operand(varop, i, index) &&
+ get_op_opfamily_strategy(expr_op,
+ index->opfamily[i]) == op_strategy &&
+ IndexCollMatchesExprColl(index->indexcollations[i],
+ list_nth_oid(clause->inputcollids,
+ matching_cols)))
+ break;
+ }
+ if (i >= index->nkeycolumns)
+ break; /* no match found */
+
+ /* Add column number to returned list */
+ iclause->indexcols = lappend_int(iclause->indexcols, i);
+
+ /* Add operator info to lists */
+ get_op_opfamily_properties(expr_op, index->opfamily[i], false,
+ &op_strategy,
+ &op_lefttype,
+ &op_righttype);
+ expr_ops = lappend_oid(expr_ops, expr_op);
+ opfamilies = lappend_oid(opfamilies, index->opfamily[i]);
+ lefttypes = lappend_oid(lefttypes, op_lefttype);
+ righttypes = lappend_oid(righttypes, op_righttype);
+
+ /* This column matches, keep scanning */
+ matching_cols++;
+ }
+
+ /* Result is non-lossy if all columns are usable as index quals */
+ iclause->lossy = (matching_cols != list_length(clause->opnos));
+
+ /*
+ * We can use rinfo->clause as-is if we have var on left and it's all
+ * usable as index quals.
+ */
+ if (var_on_left && !iclause->lossy)
+ iclause->indexquals = list_make1(rinfo);
+ else
+ {
+ /*
+ * We have to generate a modified rowcompare (possibly just one
+ * OpExpr). The painful part of this is changing < to <= or > to >=,
+ * so deal with that first.
+ */
+ if (!iclause->lossy)
+ {
+ /* very easy, just use the commuted operators */
+ new_ops = expr_ops;
+ }
+ else if (op_strategy == BTLessEqualStrategyNumber ||
+ op_strategy == BTGreaterEqualStrategyNumber)
+ {
+ /* easy, just use the same (possibly commuted) operators */
+ new_ops = list_truncate(expr_ops, matching_cols);
+ }
+ else
+ {
+ ListCell *opfamilies_cell;
+ ListCell *lefttypes_cell;
+ ListCell *righttypes_cell;
+
+ if (op_strategy == BTLessStrategyNumber)
+ op_strategy = BTLessEqualStrategyNumber;
+ else if (op_strategy == BTGreaterStrategyNumber)
+ op_strategy = BTGreaterEqualStrategyNumber;
+ else
+ elog(ERROR, "unexpected strategy number %d", op_strategy);
+ new_ops = NIL;
+ forthree(opfamilies_cell, opfamilies,
+ lefttypes_cell, lefttypes,
+ righttypes_cell, righttypes)
+ {
+ Oid opfam = lfirst_oid(opfamilies_cell);
+ Oid lefttype = lfirst_oid(lefttypes_cell);
+ Oid righttype = lfirst_oid(righttypes_cell);
+
+ expr_op = get_opfamily_member(opfam, lefttype, righttype,
+ op_strategy);
+ if (!OidIsValid(expr_op)) /* should not happen */
+ elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
+ op_strategy, lefttype, righttype, opfam);
+ new_ops = lappend_oid(new_ops, expr_op);
+ }
+ }
+
+ /* If we have more than one matching col, create a subset rowcompare */
+ if (matching_cols > 1)
+ {
+ RowCompareExpr *rc = makeNode(RowCompareExpr);
+
+ rc->rctype = (RowCompareType) op_strategy;
+ rc->opnos = new_ops;
+ rc->opfamilies = list_truncate(list_copy(clause->opfamilies),
+ matching_cols);
+ rc->inputcollids = list_truncate(list_copy(clause->inputcollids),
+ matching_cols);
+ rc->largs = list_truncate(copyObject(var_args),
+ matching_cols);
+ rc->rargs = list_truncate(copyObject(non_var_args),
+ matching_cols);
+ iclause->indexquals = list_make1(make_simple_restrictinfo(root,
+ (Expr *) rc));
+ }
+ else
+ {
+ Expr *op;
+
+ /* We don't report an index column list in this case */
+ iclause->indexcols = NIL;
+
+ op = make_opclause(linitial_oid(new_ops), BOOLOID, false,
+ copyObject(linitial(var_args)),
+ copyObject(linitial(non_var_args)),
+ InvalidOid,
+ linitial_oid(clause->inputcollids));
+ iclause->indexquals = list_make1(make_simple_restrictinfo(root, op));
+ }
+ }
+
+ return iclause;
+}
+
+
+/****************************************************************************
+ * ---- ROUTINES TO CHECK ORDERING OPERATORS ----
+ ****************************************************************************/
+
+/*
+ * match_pathkeys_to_index
+ * Test whether an index can produce output ordered according to the
+ * given pathkeys using "ordering operators".
+ *
+ * If it can, return a list of suitable ORDER BY expressions, each of the form
+ * "indexedcol operator pseudoconstant", along with an integer list of the
+ * index column numbers (zero based) that each clause would be used with.
+ * NIL lists are returned if the ordering is not achievable this way.
+ *
+ * On success, the result list is ordered by pathkeys, and in fact is
+ * one-to-one with the requested pathkeys.
+ */
+static void
+match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys,
+ List **orderby_clauses_p,
+ List **clause_columns_p)
+{
+ List *orderby_clauses = NIL;
+ List *clause_columns = NIL;
+ ListCell *lc1;
+
+ *orderby_clauses_p = NIL; /* set default results */
+ *clause_columns_p = NIL;
+
+ /* Only indexes with the amcanorderbyop property are interesting here */
+ if (!index->amcanorderbyop)
+ return;
+
+ foreach(lc1, pathkeys)
+ {
+ PathKey *pathkey = (PathKey *) lfirst(lc1);
+ bool found = false;
+ ListCell *lc2;
+
+ /*
+ * Note: for any failure to match, we just return NIL immediately.
+ * There is no value in matching just some of the pathkeys.
+ */
+
+ /* Pathkey must request default sort order for the target opfamily */
+ if (pathkey->pk_strategy != BTLessStrategyNumber ||
+ pathkey->pk_nulls_first)
+ return;
+
+ /* If eclass is volatile, no hope of using an indexscan */
+ if (pathkey->pk_eclass->ec_has_volatile)
+ return;
+
+ /*
+ * Try to match eclass member expression(s) to index. Note that child
+ * EC members are considered, but only when they belong to the target
+ * relation. (Unlike regular members, the same expression could be a
+ * child member of more than one EC. Therefore, the same index could
+ * be considered to match more than one pathkey list, which is OK
+ * here. See also get_eclass_for_sort_expr.)
+ */
+ foreach(lc2, pathkey->pk_eclass->ec_members)
+ {
+ EquivalenceMember *member = (EquivalenceMember *) lfirst(lc2);
+ int indexcol;
+
+ /* No possibility of match if it references other relations */
+ if (!bms_equal(member->em_relids, index->rel->relids))
+ continue;
+
+ /*
+ * We allow any column of the index to match each pathkey; they
+ * don't have to match left-to-right as you might expect. This is
+ * correct for GiST, and it doesn't matter for SP-GiST because
+ * that doesn't handle multiple columns anyway, and no other
+ * existing AMs support amcanorderbyop. We might need different
+ * logic in future for other implementations.
+ */
+ for (indexcol = 0; indexcol < index->nkeycolumns; indexcol++)
+ {
+ Expr *expr;
+
+ expr = match_clause_to_ordering_op(index,
+ indexcol,
+ member->em_expr,
+ pathkey->pk_opfamily);
+ if (expr)
+ {
+ orderby_clauses = lappend(orderby_clauses, expr);
+ clause_columns = lappend_int(clause_columns, indexcol);
+ found = true;
+ break;
+ }
+ }
+
+ if (found) /* don't want to look at remaining members */
+ break;
+ }
+
+ if (!found) /* fail if no match for this pathkey */
+ return;
+ }
+
+ *orderby_clauses_p = orderby_clauses; /* success! */
+ *clause_columns_p = clause_columns;
+}
+
+/*
+ * match_clause_to_ordering_op
+ * Determines whether an ordering operator expression matches an
+ * index column.
+ *
+ * This is similar to, but simpler than, match_clause_to_indexcol.
+ * We only care about simple OpExpr cases. The input is a bare
+ * expression that is being ordered by, which must be of the form
+ * (indexkey op const) or (const op indexkey) where op is an ordering
+ * operator for the column's opfamily.
+ *
+ * 'index' is the index of interest.
+ * 'indexcol' is a column number of 'index' (counting from 0).
+ * 'clause' is the ordering expression to be tested.
+ * 'pk_opfamily' is the btree opfamily describing the required sort order.
+ *
+ * Note that we currently do not consider the collation of the ordering
+ * operator's result. In practical cases the result type will be numeric
+ * and thus have no collation, and it's not very clear what to match to
+ * if it did have a collation. The index's collation should match the
+ * ordering operator's input collation, not its result.
+ *
+ * If successful, return 'clause' as-is if the indexkey is on the left,
+ * otherwise a commuted copy of 'clause'. If no match, return NULL.
+ */
+static Expr *
+match_clause_to_ordering_op(IndexOptInfo *index,
+ int indexcol,
+ Expr *clause,
+ Oid pk_opfamily)
+{
+ Oid opfamily;
+ Oid idxcollation;
+ Node *leftop,
+ *rightop;
+ Oid expr_op;
+ Oid expr_coll;
+ Oid sortfamily;
+ bool commuted;
+
+ Assert(indexcol < index->nkeycolumns);
+
+ opfamily = index->opfamily[indexcol];
+ idxcollation = index->indexcollations[indexcol];
+
+ /*
+ * Clause must be a binary opclause.
+ */
+ if (!is_opclause(clause))
+ return NULL;
+ leftop = get_leftop(clause);
+ rightop = get_rightop(clause);
+ if (!leftop || !rightop)
+ return NULL;
+ expr_op = ((OpExpr *) clause)->opno;
+ expr_coll = ((OpExpr *) clause)->inputcollid;
+
+ /*
+ * We can forget the whole thing right away if wrong collation.
+ */
+ if (!IndexCollMatchesExprColl(idxcollation, expr_coll))
+ return NULL;
+
+ /*
+ * Check for clauses of the form: (indexkey operator constant) or
+ * (constant operator indexkey).
+ */
+ if (match_index_to_operand(leftop, indexcol, index) &&
+ !contain_var_clause(rightop) &&
+ !contain_volatile_functions(rightop))
+ {
+ commuted = false;
+ }
+ else if (match_index_to_operand(rightop, indexcol, index) &&
+ !contain_var_clause(leftop) &&
+ !contain_volatile_functions(leftop))
+ {
+ /* Might match, but we need a commuted operator */
+ expr_op = get_commutator(expr_op);
+ if (expr_op == InvalidOid)
+ return NULL;
+ commuted = true;
+ }
+ else
+ return NULL;
+
+ /*
+ * Is the (commuted) operator an ordering operator for the opfamily? And
+ * if so, does it yield the right sorting semantics?
+ */
+ sortfamily = get_op_opfamily_sortfamily(expr_op, opfamily);
+ if (sortfamily != pk_opfamily)
+ return NULL;
+
+ /* We have a match. Return clause or a commuted version thereof. */
+ if (commuted)
+ {
+ OpExpr *newclause = makeNode(OpExpr);
+
+ /* flat-copy all the fields of clause */
+ memcpy(newclause, clause, sizeof(OpExpr));
+
+ /* commute it */
+ newclause->opno = expr_op;
+ newclause->opfuncid = InvalidOid;
+ newclause->args = list_make2(rightop, leftop);
+
+ clause = (Expr *) newclause;
+ }
+
+ return clause;
+}
+
+
+/****************************************************************************
+ * ---- ROUTINES TO DO PARTIAL INDEX PREDICATE TESTS ----
+ ****************************************************************************/
+
+/*
+ * check_index_predicates
+ * Set the predicate-derived IndexOptInfo fields for each index
+ * of the specified relation.
+ *
+ * predOK is set true if the index is partial and its predicate is satisfied
+ * for this query, ie the query's WHERE clauses imply the predicate.
+ *
+ * indrestrictinfo is set to the relation's baserestrictinfo list less any
+ * conditions that are implied by the index's predicate. (Obviously, for a
+ * non-partial index, this is the same as baserestrictinfo.) Such conditions
+ * can be dropped from the plan when using the index, in certain cases.
+ *
+ * At one time it was possible for this to get re-run after adding more
+ * restrictions to the rel, thus possibly letting us prove more indexes OK.
+ * That doesn't happen any more (at least not in the core code's usage),
+ * but this code still supports it in case extensions want to mess with the
+ * baserestrictinfo list. We assume that adding more restrictions can't make
+ * an index not predOK. We must recompute indrestrictinfo each time, though,
+ * to make sure any newly-added restrictions get into it if needed.
+ */
+void
+check_index_predicates(PlannerInfo *root, RelOptInfo *rel)
+{
+ List *clauselist;
+ bool have_partial;
+ bool is_target_rel;
+ Relids otherrels;
+ ListCell *lc;
+
+ /* Indexes are available only on base or "other" member relations. */
+ Assert(IS_SIMPLE_REL(rel));
+
+ /*
+ * Initialize the indrestrictinfo lists to be identical to
+ * baserestrictinfo, and check whether there are any partial indexes. If
+ * not, this is all we need to do.
+ */
+ have_partial = false;
+ foreach(lc, rel->indexlist)
+ {
+ IndexOptInfo *index = (IndexOptInfo *) lfirst(lc);
+
+ index->indrestrictinfo = rel->baserestrictinfo;
+ if (index->indpred)
+ have_partial = true;
+ }
+ if (!have_partial)
+ return;
+
+ /*
+ * Construct a list of clauses that we can assume true for the purpose of
+ * proving the index(es) usable. Restriction clauses for the rel are
+ * always usable, and so are any join clauses that are "movable to" this
+ * rel. Also, we can consider any EC-derivable join clauses (which must
+ * be "movable to" this rel, by definition).
+ */
+ clauselist = list_copy(rel->baserestrictinfo);
+
+ /* Scan the rel's join clauses */
+ foreach(lc, rel->joininfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ /* Check if clause can be moved to this rel */
+ if (!join_clause_is_movable_to(rinfo, rel))
+ continue;
+
+ clauselist = lappend(clauselist, rinfo);
+ }
+
+ /*
+ * Add on any equivalence-derivable join clauses. Computing the correct
+ * relid sets for generate_join_implied_equalities is slightly tricky
+ * because the rel could be a child rel rather than a true baserel, and in
+ * that case we must remove its parents' relid(s) from all_baserels.
+ */
+ if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL)
+ otherrels = bms_difference(root->all_baserels,
+ find_childrel_parents(root, rel));
+ else
+ otherrels = bms_difference(root->all_baserels, rel->relids);
+
+ if (!bms_is_empty(otherrels))
+ clauselist =
+ list_concat(clauselist,
+ generate_join_implied_equalities(root,
+ bms_union(rel->relids,
+ otherrels),
+ otherrels,
+ rel));
+
+ /*
+ * Normally we remove quals that are implied by a partial index's
+ * predicate from indrestrictinfo, indicating that they need not be
+ * checked explicitly by an indexscan plan using this index. However, if
+ * the rel is a target relation of UPDATE/DELETE/MERGE/SELECT FOR UPDATE,
+ * we cannot remove such quals from the plan, because they need to be in
+ * the plan so that they will be properly rechecked by EvalPlanQual
+ * testing. Some day we might want to remove such quals from the main
+ * plan anyway and pass them through to EvalPlanQual via a side channel;
+ * but for now, we just don't remove implied quals at all for target
+ * relations.
+ */
+ is_target_rel = (bms_is_member(rel->relid, root->all_result_relids) ||
+ get_plan_rowmark(root->rowMarks, rel->relid) != NULL);
+
+ /*
+ * Now try to prove each index predicate true, and compute the
+ * indrestrictinfo lists for partial indexes. Note that we compute the
+ * indrestrictinfo list even for non-predOK indexes; this might seem
+ * wasteful, but we may be able to use such indexes in OR clauses, cf
+ * generate_bitmap_or_paths().
+ */
+ foreach(lc, rel->indexlist)
+ {
+ IndexOptInfo *index = (IndexOptInfo *) lfirst(lc);
+ ListCell *lcr;
+
+ if (index->indpred == NIL)
+ continue; /* ignore non-partial indexes here */
+
+ if (!index->predOK) /* don't repeat work if already proven OK */
+ index->predOK = predicate_implied_by(index->indpred, clauselist,
+ false);
+
+ /* If rel is an update target, leave indrestrictinfo as set above */
+ if (is_target_rel)
+ continue;
+
+ /* Else compute indrestrictinfo as the non-implied quals */
+ index->indrestrictinfo = NIL;
+ foreach(lcr, rel->baserestrictinfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lcr);
+
+ /* predicate_implied_by() assumes first arg is immutable */
+ if (contain_mutable_functions((Node *) rinfo->clause) ||
+ !predicate_implied_by(list_make1(rinfo->clause),
+ index->indpred, false))
+ index->indrestrictinfo = lappend(index->indrestrictinfo, rinfo);
+ }
+ }
+}
+
+/****************************************************************************
+ * ---- ROUTINES TO CHECK EXTERNALLY-VISIBLE CONDITIONS ----
+ ****************************************************************************/
+
+/*
+ * ec_member_matches_indexcol
+ * Test whether an EquivalenceClass member matches an index column.
+ *
+ * This is a callback for use by generate_implied_equalities_for_column.
+ */
+static bool
+ec_member_matches_indexcol(PlannerInfo *root, RelOptInfo *rel,
+ EquivalenceClass *ec, EquivalenceMember *em,
+ void *arg)
+{
+ IndexOptInfo *index = ((ec_member_matches_arg *) arg)->index;
+ int indexcol = ((ec_member_matches_arg *) arg)->indexcol;
+ Oid curFamily;
+ Oid curCollation;
+
+ Assert(indexcol < index->nkeycolumns);
+
+ curFamily = index->opfamily[indexcol];
+ curCollation = index->indexcollations[indexcol];
+
+ /*
+ * If it's a btree index, we can reject it if its opfamily isn't
+ * compatible with the EC, since no clause generated from the EC could be
+ * used with the index. For non-btree indexes, we can't easily tell
+ * whether clauses generated from the EC could be used with the index, so
+ * don't check the opfamily. This might mean we return "true" for a
+ * useless EC, so we have to recheck the results of
+ * generate_implied_equalities_for_column; see
+ * match_eclass_clauses_to_index.
+ */
+ if (index->relam == BTREE_AM_OID &&
+ !list_member_oid(ec->ec_opfamilies, curFamily))
+ return false;
+
+ /* We insist on collation match for all index types, though */
+ if (!IndexCollMatchesExprColl(curCollation, ec->ec_collation))
+ return false;
+
+ return match_index_to_operand((Node *) em->em_expr, indexcol, index);
+}
+
+/*
+ * relation_has_unique_index_for
+ * Determine whether the relation provably has at most one row satisfying
+ * a set of equality conditions, because the conditions constrain all
+ * columns of some unique index.
+ *
+ * The conditions can be represented in either or both of two ways:
+ * 1. A list of RestrictInfo nodes, where the caller has already determined
+ * that each condition is a mergejoinable equality with an expression in
+ * this relation on one side, and an expression not involving this relation
+ * on the other. The transient outer_is_left flag is used to identify which
+ * side we should look at: left side if outer_is_left is false, right side
+ * if it is true.
+ * 2. A list of expressions in this relation, and a corresponding list of
+ * equality operators. The caller must have already checked that the operators
+ * represent equality. (Note: the operators could be cross-type; the
+ * expressions should correspond to their RHS inputs.)
+ *
+ * The caller need only supply equality conditions arising from joins;
+ * this routine automatically adds in any usable baserestrictinfo clauses.
+ * (Note that the passed-in restrictlist will be destructively modified!)
+ */
+bool
+relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel,
+ List *restrictlist,
+ List *exprlist, List *oprlist)
+{
+ ListCell *ic;
+
+ Assert(list_length(exprlist) == list_length(oprlist));
+
+ /* Short-circuit if no indexes... */
+ if (rel->indexlist == NIL)
+ return false;
+
+ /*
+ * Examine the rel's restriction clauses for usable var = const clauses
+ * that we can add to the restrictlist.
+ */
+ foreach(ic, rel->baserestrictinfo)
+ {
+ RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(ic);
+
+ /*
+ * Note: can_join won't be set for a restriction clause, but
+ * mergeopfamilies will be if it has a mergejoinable operator and
+ * doesn't contain volatile functions.
+ */
+ if (restrictinfo->mergeopfamilies == NIL)
+ continue; /* not mergejoinable */
+
+ /*
+ * The clause certainly doesn't refer to anything but the given rel.
+ * If either side is pseudoconstant then we can use it.
+ */
+ if (bms_is_empty(restrictinfo->left_relids))
+ {
+ /* righthand side is inner */
+ restrictinfo->outer_is_left = true;
+ }
+ else if (bms_is_empty(restrictinfo->right_relids))
+ {
+ /* lefthand side is inner */
+ restrictinfo->outer_is_left = false;
+ }
+ else
+ continue;
+
+ /* OK, add to list */
+ restrictlist = lappend(restrictlist, restrictinfo);
+ }
+
+ /* Short-circuit the easy case */
+ if (restrictlist == NIL && exprlist == NIL)
+ return false;
+
+ /* Examine each index of the relation ... */
+ foreach(ic, rel->indexlist)
+ {
+ IndexOptInfo *ind = (IndexOptInfo *) lfirst(ic);
+ int c;
+
+ /*
+ * If the index is not unique, or not immediately enforced, or if it's
+ * a partial index, it's useless here. We're unable to make use of
+ * predOK partial unique indexes due to the fact that
+ * check_index_predicates() also makes use of join predicates to
+ * determine if the partial index is usable. Here we need proofs that
+ * hold true before any joins are evaluated.
+ */
+ if (!ind->unique || !ind->immediate || ind->indpred != NIL)
+ continue;
+
+ /*
+ * Try to find each index column in the lists of conditions. This is
+ * O(N^2) or worse, but we expect all the lists to be short.
+ */
+ for (c = 0; c < ind->nkeycolumns; c++)
+ {
+ bool matched = false;
+ ListCell *lc;
+ ListCell *lc2;
+
+ foreach(lc, restrictlist)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ Node *rexpr;
+
+ /*
+ * The condition's equality operator must be a member of the
+ * index opfamily, else it is not asserting the right kind of
+ * equality behavior for this index. We check this first
+ * since it's probably cheaper than match_index_to_operand().
+ */
+ if (!list_member_oid(rinfo->mergeopfamilies, ind->opfamily[c]))
+ continue;
+
+ /*
+ * XXX at some point we may need to check collations here too.
+ * For the moment we assume all collations reduce to the same
+ * notion of equality.
+ */
+
+ /* OK, see if the condition operand matches the index key */
+ if (rinfo->outer_is_left)
+ rexpr = get_rightop(rinfo->clause);
+ else
+ rexpr = get_leftop(rinfo->clause);
+
+ if (match_index_to_operand(rexpr, c, ind))
+ {
+ matched = true; /* column is unique */
+ break;
+ }
+ }
+
+ if (matched)
+ continue;
+
+ forboth(lc, exprlist, lc2, oprlist)
+ {
+ Node *expr = (Node *) lfirst(lc);
+ Oid opr = lfirst_oid(lc2);
+
+ /* See if the expression matches the index key */
+ if (!match_index_to_operand(expr, c, ind))
+ continue;
+
+ /*
+ * The equality operator must be a member of the index
+ * opfamily, else it is not asserting the right kind of
+ * equality behavior for this index. We assume the caller
+ * determined it is an equality operator, so we don't need to
+ * check any more tightly than this.
+ */
+ if (!op_in_opfamily(opr, ind->opfamily[c]))
+ continue;
+
+ /*
+ * XXX at some point we may need to check collations here too.
+ * For the moment we assume all collations reduce to the same
+ * notion of equality.
+ */
+
+ matched = true; /* column is unique */
+ break;
+ }
+
+ if (!matched)
+ break; /* no match; this index doesn't help us */
+ }
+
+ /* Matched all key columns of this index? */
+ if (c == ind->nkeycolumns)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * indexcol_is_bool_constant_for_query
+ *
+ * If an index column is constrained to have a constant value by the query's
+ * WHERE conditions, then it's irrelevant for sort-order considerations.
+ * Usually that means we have a restriction clause WHERE indexcol = constant,
+ * which gets turned into an EquivalenceClass containing a constant, which
+ * is recognized as redundant by build_index_pathkeys(). But if the index
+ * column is a boolean variable (or expression), then we are not going to
+ * see WHERE indexcol = constant, because expression preprocessing will have
+ * simplified that to "WHERE indexcol" or "WHERE NOT indexcol". So we are not
+ * going to have a matching EquivalenceClass (unless the query also contains
+ * "ORDER BY indexcol"). To allow such cases to work the same as they would
+ * for non-boolean values, this function is provided to detect whether the
+ * specified index column matches a boolean restriction clause.
+ */
+bool
+indexcol_is_bool_constant_for_query(PlannerInfo *root,
+ IndexOptInfo *index,
+ int indexcol)
+{
+ ListCell *lc;
+
+ /* If the index isn't boolean, we can't possibly get a match */
+ if (!IsBooleanOpfamily(index->opfamily[indexcol]))
+ return false;
+
+ /* Check each restriction clause for the index's rel */
+ foreach(lc, index->rel->baserestrictinfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ /*
+ * As in match_clause_to_indexcol, never match pseudoconstants to
+ * indexes. (It might be semantically okay to do so here, but the
+ * odds of getting a match are negligible, so don't waste the cycles.)
+ */
+ if (rinfo->pseudoconstant)
+ continue;
+
+ /* See if we can match the clause's expression to the index column */
+ if (match_boolean_index_clause(root, rinfo, indexcol, index))
+ return true;
+ }
+
+ return false;
+}
+
+
+/****************************************************************************
+ * ---- ROUTINES TO CHECK OPERANDS ----
+ ****************************************************************************/
+
+/*
+ * match_index_to_operand()
+ * Generalized test for a match between an index's key
+ * and the operand on one side of a restriction or join clause.
+ *
+ * operand: the nodetree to be compared to the index
+ * indexcol: the column number of the index (counting from 0)
+ * index: the index of interest
+ *
+ * Note that we aren't interested in collations here; the caller must check
+ * for a collation match, if it's dealing with an operator where that matters.
+ *
+ * This is exported for use in selfuncs.c.
+ */
+bool
+match_index_to_operand(Node *operand,
+ int indexcol,
+ IndexOptInfo *index)
+{
+ int indkey;
+
+ /*
+ * Ignore any RelabelType node above the operand. This is needed to be
+ * able to apply indexscanning in binary-compatible-operator cases. Note:
+ * we can assume there is at most one RelabelType node;
+ * eval_const_expressions() will have simplified if more than one.
+ */
+ if (operand && IsA(operand, RelabelType))
+ operand = (Node *) ((RelabelType *) operand)->arg;
+
+ indkey = index->indexkeys[indexcol];
+ if (indkey != 0)
+ {
+ /*
+ * Simple index column; operand must be a matching Var.
+ */
+ if (operand && IsA(operand, Var) &&
+ index->rel->relid == ((Var *) operand)->varno &&
+ indkey == ((Var *) operand)->varattno)
+ return true;
+ }
+ else
+ {
+ /*
+ * Index expression; find the correct expression. (This search could
+ * be avoided, at the cost of complicating all the callers of this
+ * routine; doesn't seem worth it.)
+ */
+ ListCell *indexpr_item;
+ int i;
+ Node *indexkey;
+
+ indexpr_item = list_head(index->indexprs);
+ for (i = 0; i < indexcol; i++)
+ {
+ if (index->indexkeys[i] == 0)
+ {
+ if (indexpr_item == NULL)
+ elog(ERROR, "wrong number of index expressions");
+ indexpr_item = lnext(index->indexprs, indexpr_item);
+ }
+ }
+ if (indexpr_item == NULL)
+ elog(ERROR, "wrong number of index expressions");
+ indexkey = (Node *) lfirst(indexpr_item);
+
+ /*
+ * Does it match the operand? Again, strip any relabeling.
+ */
+ if (indexkey && IsA(indexkey, RelabelType))
+ indexkey = (Node *) ((RelabelType *) indexkey)->arg;
+
+ if (equal(indexkey, operand))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * is_pseudo_constant_for_index()
+ * Test whether the given expression can be used as an indexscan
+ * comparison value.
+ *
+ * An indexscan comparison value must not contain any volatile functions,
+ * and it can't contain any Vars of the index's own table. Vars of
+ * other tables are okay, though; in that case we'd be producing an
+ * indexqual usable in a parameterized indexscan. This is, therefore,
+ * a weaker condition than is_pseudo_constant_clause().
+ *
+ * This function is exported for use by planner support functions,
+ * which will have available the IndexOptInfo, but not any RestrictInfo
+ * infrastructure. It is making the same test made by functions above
+ * such as match_opclause_to_indexcol(), but those rely where possible
+ * on RestrictInfo information about variable membership.
+ *
+ * expr: the nodetree to be checked
+ * index: the index of interest
+ */
+bool
+is_pseudo_constant_for_index(PlannerInfo *root, Node *expr, IndexOptInfo *index)
+{
+ /* pull_varnos is cheaper than volatility check, so do that first */
+ if (bms_is_member(index->rel->relid, pull_varnos(root, expr)))
+ return false; /* no good, contains Var of table */
+ if (contain_volatile_functions(expr))
+ return false; /* no good, volatile comparison value */
+ return true;
+}
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
new file mode 100644
index 0000000..bad3dd1
--- /dev/null
+++ b/src/backend/optimizer/path/joinpath.c
@@ -0,0 +1,2367 @@
+/*-------------------------------------------------------------------------
+ *
+ * joinpath.c
+ * Routines to find all possible paths for processing a set of joins
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/path/joinpath.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <math.h>
+
+#include "executor/executor.h"
+#include "foreign/fdwapi.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/planmain.h"
+#include "optimizer/restrictinfo.h"
+#include "utils/typcache.h"
+
+/* Hook for plugins to get control in add_paths_to_joinrel() */
+set_join_pathlist_hook_type set_join_pathlist_hook = NULL;
+
+/*
+ * Paths parameterized by the parent can be considered to be parameterized by
+ * any of its child.
+ */
+#define PATH_PARAM_BY_PARENT(path, rel) \
+ ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), \
+ (rel)->top_parent_relids))
+#define PATH_PARAM_BY_REL_SELF(path, rel) \
+ ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), (rel)->relids))
+
+#define PATH_PARAM_BY_REL(path, rel) \
+ (PATH_PARAM_BY_REL_SELF(path, rel) || PATH_PARAM_BY_PARENT(path, rel))
+
+static void try_partial_mergejoin_path(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ Path *outer_path,
+ Path *inner_path,
+ List *pathkeys,
+ List *mergeclauses,
+ List *outersortkeys,
+ List *innersortkeys,
+ JoinType jointype,
+ JoinPathExtraData *extra);
+static void sort_inner_and_outer(PlannerInfo *root, RelOptInfo *joinrel,
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ JoinType jointype, JoinPathExtraData *extra);
+static inline bool clause_sides_match_join(RestrictInfo *rinfo,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel);
+static void match_unsorted_outer(PlannerInfo *root, RelOptInfo *joinrel,
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ JoinType jointype, JoinPathExtraData *extra);
+static void consider_parallel_nestloop(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra);
+static void consider_parallel_mergejoin(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra,
+ Path *inner_cheapest_total);
+static void hash_inner_and_outer(PlannerInfo *root, RelOptInfo *joinrel,
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ JoinType jointype, JoinPathExtraData *extra);
+static List *select_mergejoin_clauses(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ List *restrictlist,
+ JoinType jointype,
+ bool *mergejoin_allowed);
+static void generate_mergejoin_paths(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *innerrel,
+ Path *outerpath,
+ JoinType jointype,
+ JoinPathExtraData *extra,
+ bool useallclauses,
+ Path *inner_cheapest_total,
+ List *merge_pathkeys,
+ bool is_partial);
+
+
+/*
+ * add_paths_to_joinrel
+ * Given a join relation and two component rels from which it can be made,
+ * consider all possible paths that use the two component rels as outer
+ * and inner rel respectively. Add these paths to the join rel's pathlist
+ * if they survive comparison with other paths (and remove any existing
+ * paths that are dominated by these paths).
+ *
+ * Modifies the pathlist field of the joinrel node to contain the best
+ * paths found so far.
+ *
+ * jointype is not necessarily the same as sjinfo->jointype; it might be
+ * "flipped around" if we are considering joining the rels in the opposite
+ * direction from what's indicated in sjinfo.
+ *
+ * Also, this routine and others in this module accept the special JoinTypes
+ * JOIN_UNIQUE_OUTER and JOIN_UNIQUE_INNER to indicate that we should
+ * unique-ify the outer or inner relation and then apply a regular inner
+ * join. These values are not allowed to propagate outside this module,
+ * however. Path cost estimation code may need to recognize that it's
+ * dealing with such a case --- the combination of nominal jointype INNER
+ * with sjinfo->jointype == JOIN_SEMI indicates that.
+ */
+void
+add_paths_to_joinrel(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ SpecialJoinInfo *sjinfo,
+ List *restrictlist)
+{
+ JoinPathExtraData extra;
+ bool mergejoin_allowed = true;
+ bool consider_join_pushdown = false;
+ ListCell *lc;
+ Relids joinrelids;
+
+ /*
+ * PlannerInfo doesn't contain the SpecialJoinInfos created for joins
+ * between child relations, even if there is a SpecialJoinInfo node for
+ * the join between the topmost parents. So, while calculating Relids set
+ * representing the restriction, consider relids of topmost parent of
+ * partitions.
+ */
+ if (joinrel->reloptkind == RELOPT_OTHER_JOINREL)
+ joinrelids = joinrel->top_parent_relids;
+ else
+ joinrelids = joinrel->relids;
+
+ extra.restrictlist = restrictlist;
+ extra.mergeclause_list = NIL;
+ extra.sjinfo = sjinfo;
+ extra.param_source_rels = NULL;
+
+ /*
+ * See if the inner relation is provably unique for this outer rel.
+ *
+ * We have some special cases: for JOIN_SEMI and JOIN_ANTI, it doesn't
+ * matter since the executor can make the equivalent optimization anyway;
+ * we need not expend planner cycles on proofs. For JOIN_UNIQUE_INNER, we
+ * must be considering a semijoin whose inner side is not provably unique
+ * (else reduce_unique_semijoins would've simplified it), so there's no
+ * point in calling innerrel_is_unique. However, if the LHS covers all of
+ * the semijoin's min_lefthand, then it's appropriate to set inner_unique
+ * because the path produced by create_unique_path will be unique relative
+ * to the LHS. (If we have an LHS that's only part of the min_lefthand,
+ * that is *not* true.) For JOIN_UNIQUE_OUTER, pass JOIN_INNER to avoid
+ * letting that value escape this module.
+ */
+ switch (jointype)
+ {
+ case JOIN_SEMI:
+ case JOIN_ANTI:
+
+ /*
+ * XXX it may be worth proving this to allow a Memoize to be
+ * considered for Nested Loop Semi/Anti Joins.
+ */
+ extra.inner_unique = false; /* well, unproven */
+ break;
+ case JOIN_UNIQUE_INNER:
+ extra.inner_unique = bms_is_subset(sjinfo->min_lefthand,
+ outerrel->relids);
+ break;
+ case JOIN_UNIQUE_OUTER:
+ extra.inner_unique = innerrel_is_unique(root,
+ joinrel->relids,
+ outerrel->relids,
+ innerrel,
+ JOIN_INNER,
+ restrictlist,
+ false);
+ break;
+ default:
+ extra.inner_unique = innerrel_is_unique(root,
+ joinrel->relids,
+ outerrel->relids,
+ innerrel,
+ jointype,
+ restrictlist,
+ false);
+ break;
+ }
+
+ /*
+ * Find potential mergejoin clauses. We can skip this if we are not
+ * interested in doing a mergejoin. However, mergejoin may be our only
+ * way of implementing a full outer join, so override enable_mergejoin if
+ * it's a full join.
+ */
+ if (enable_mergejoin || jointype == JOIN_FULL)
+ extra.mergeclause_list = select_mergejoin_clauses(root,
+ joinrel,
+ outerrel,
+ innerrel,
+ restrictlist,
+ jointype,
+ &mergejoin_allowed);
+
+ /*
+ * If it's SEMI, ANTI, or inner_unique join, compute correction factors
+ * for cost estimation. These will be the same for all paths.
+ */
+ if (jointype == JOIN_SEMI || jointype == JOIN_ANTI || extra.inner_unique)
+ compute_semi_anti_join_factors(root, joinrel, outerrel, innerrel,
+ jointype, sjinfo, restrictlist,
+ &extra.semifactors);
+
+ /*
+ * Decide whether it's sensible to generate parameterized paths for this
+ * joinrel, and if so, which relations such paths should require. There
+ * is usually no need to create a parameterized result path unless there
+ * is a join order restriction that prevents joining one of our input rels
+ * directly to the parameter source rel instead of joining to the other
+ * input rel. (But see allow_star_schema_join().) This restriction
+ * reduces the number of parameterized paths we have to deal with at
+ * higher join levels, without compromising the quality of the resulting
+ * plan. We express the restriction as a Relids set that must overlap the
+ * parameterization of any proposed join path.
+ */
+ foreach(lc, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo2 = (SpecialJoinInfo *) lfirst(lc);
+
+ /*
+ * SJ is relevant to this join if we have some part of its RHS
+ * (possibly not all of it), and haven't yet joined to its LHS. (This
+ * test is pretty simplistic, but should be sufficient considering the
+ * join has already been proven legal.) If the SJ is relevant, it
+ * presents constraints for joining to anything not in its RHS.
+ */
+ if (bms_overlap(joinrelids, sjinfo2->min_righthand) &&
+ !bms_overlap(joinrelids, sjinfo2->min_lefthand))
+ extra.param_source_rels = bms_join(extra.param_source_rels,
+ bms_difference(root->all_baserels,
+ sjinfo2->min_righthand));
+
+ /* full joins constrain both sides symmetrically */
+ if (sjinfo2->jointype == JOIN_FULL &&
+ bms_overlap(joinrelids, sjinfo2->min_lefthand) &&
+ !bms_overlap(joinrelids, sjinfo2->min_righthand))
+ extra.param_source_rels = bms_join(extra.param_source_rels,
+ bms_difference(root->all_baserels,
+ sjinfo2->min_lefthand));
+ }
+
+ /*
+ * However, when a LATERAL subquery is involved, there will simply not be
+ * any paths for the joinrel that aren't parameterized by whatever the
+ * subquery is parameterized by, unless its parameterization is resolved
+ * within the joinrel. So we might as well allow additional dependencies
+ * on whatever residual lateral dependencies the joinrel will have.
+ */
+ extra.param_source_rels = bms_add_members(extra.param_source_rels,
+ joinrel->lateral_relids);
+
+ /*
+ * 1. Consider mergejoin paths where both relations must be explicitly
+ * sorted. Skip this if we can't mergejoin.
+ */
+ if (mergejoin_allowed)
+ sort_inner_and_outer(root, joinrel, outerrel, innerrel,
+ jointype, &extra);
+
+ /*
+ * 2. Consider paths where the outer relation need not be explicitly
+ * sorted. This includes both nestloops and mergejoins where the outer
+ * path is already ordered. Again, skip this if we can't mergejoin.
+ * (That's okay because we know that nestloop can't handle right/full
+ * joins at all, so it wouldn't work in the prohibited cases either.)
+ */
+ if (mergejoin_allowed)
+ match_unsorted_outer(root, joinrel, outerrel, innerrel,
+ jointype, &extra);
+
+#ifdef NOT_USED
+
+ /*
+ * 3. Consider paths where the inner relation need not be explicitly
+ * sorted. This includes mergejoins only (nestloops were already built in
+ * match_unsorted_outer).
+ *
+ * Diked out as redundant 2/13/2000 -- tgl. There isn't any really
+ * significant difference between the inner and outer side of a mergejoin,
+ * so match_unsorted_inner creates no paths that aren't equivalent to
+ * those made by match_unsorted_outer when add_paths_to_joinrel() is
+ * invoked with the two rels given in the other order.
+ */
+ if (mergejoin_allowed)
+ match_unsorted_inner(root, joinrel, outerrel, innerrel,
+ jointype, &extra);
+#endif
+
+ /*
+ * 4. Consider paths where both outer and inner relations must be hashed
+ * before being joined. As above, disregard enable_hashjoin for full
+ * joins, because there may be no other alternative.
+ */
+ if (enable_hashjoin || jointype == JOIN_FULL)
+ hash_inner_and_outer(root, joinrel, outerrel, innerrel,
+ jointype, &extra);
+
+ /*
+ * createplan.c does not currently support handling of pseudoconstant
+ * clauses assigned to joins pushed down by extensions; check if the
+ * restrictlist has such clauses, and if not, allow them to consider
+ * pushing down joins.
+ */
+ if ((joinrel->fdwroutine &&
+ joinrel->fdwroutine->GetForeignJoinPaths) ||
+ set_join_pathlist_hook)
+ consider_join_pushdown = !has_pseudoconstant_clauses(root,
+ restrictlist);
+
+ /*
+ * 5. If inner and outer relations are foreign tables (or joins) belonging
+ * to the same server and assigned to the same user to check access
+ * permissions as, give the FDW a chance to push down joins.
+ */
+ if (joinrel->fdwroutine &&
+ joinrel->fdwroutine->GetForeignJoinPaths &&
+ consider_join_pushdown)
+ joinrel->fdwroutine->GetForeignJoinPaths(root, joinrel,
+ outerrel, innerrel,
+ jointype, &extra);
+
+ /*
+ * 6. Finally, give extensions a chance to manipulate the path list. They
+ * could add new paths (such as CustomPaths) by calling add_path(), or
+ * add_partial_path() if parallel aware. They could also delete or modify
+ * paths added by the core code.
+ */
+ if (set_join_pathlist_hook &&
+ consider_join_pushdown)
+ set_join_pathlist_hook(root, joinrel, outerrel, innerrel,
+ jointype, &extra);
+}
+
+/*
+ * We override the param_source_rels heuristic to accept nestloop paths in
+ * which the outer rel satisfies some but not all of the inner path's
+ * parameterization. This is necessary to get good plans for star-schema
+ * scenarios, in which a parameterized path for a large table may require
+ * parameters from multiple small tables that will not get joined directly to
+ * each other. We can handle that by stacking nestloops that have the small
+ * tables on the outside; but this breaks the rule the param_source_rels
+ * heuristic is based on, namely that parameters should not be passed down
+ * across joins unless there's a join-order-constraint-based reason to do so.
+ * So we ignore the param_source_rels restriction when this case applies.
+ *
+ * allow_star_schema_join() returns true if the param_source_rels restriction
+ * should be overridden, ie, it's okay to perform this join.
+ */
+static inline bool
+allow_star_schema_join(PlannerInfo *root,
+ Relids outerrelids,
+ Relids inner_paramrels)
+{
+ /*
+ * It's a star-schema case if the outer rel provides some but not all of
+ * the inner rel's parameterization.
+ */
+ return (bms_overlap(inner_paramrels, outerrelids) &&
+ bms_nonempty_difference(inner_paramrels, outerrelids));
+}
+
+/*
+ * paraminfo_get_equal_hashops
+ * Determine if param_info and innerrel's lateral_vars can be hashed.
+ * Returns true the hashing is possible, otherwise return false.
+ *
+ * Additionally we also collect the outer exprs and the hash operators for
+ * each parameter to innerrel. These set in 'param_exprs', 'operators' and
+ * 'binary_mode' when we return true.
+ */
+static bool
+paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ List **param_exprs, List **operators,
+ bool *binary_mode)
+
+{
+ ListCell *lc;
+
+ *param_exprs = NIL;
+ *operators = NIL;
+ *binary_mode = false;
+
+ if (param_info != NULL)
+ {
+ List *clauses = param_info->ppi_clauses;
+
+ foreach(lc, clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ OpExpr *opexpr;
+ Node *expr;
+ Oid hasheqoperator;
+
+ opexpr = (OpExpr *) rinfo->clause;
+
+ /*
+ * Bail if the rinfo is not compatible. We need a join OpExpr
+ * with 2 args.
+ */
+ if (!IsA(opexpr, OpExpr) || list_length(opexpr->args) != 2 ||
+ !clause_sides_match_join(rinfo, outerrel, innerrel))
+ {
+ list_free(*operators);
+ list_free(*param_exprs);
+ return false;
+ }
+
+ if (rinfo->outer_is_left)
+ {
+ expr = (Node *) linitial(opexpr->args);
+ hasheqoperator = rinfo->left_hasheqoperator;
+ }
+ else
+ {
+ expr = (Node *) lsecond(opexpr->args);
+ hasheqoperator = rinfo->right_hasheqoperator;
+ }
+
+ /* can't do memoize if we can't hash the outer type */
+ if (!OidIsValid(hasheqoperator))
+ {
+ list_free(*operators);
+ list_free(*param_exprs);
+ return false;
+ }
+
+ *operators = lappend_oid(*operators, hasheqoperator);
+ *param_exprs = lappend(*param_exprs, expr);
+
+ /*
+ * When the join operator is not hashable then it's possible that
+ * the operator will be able to distinguish something that the
+ * hash equality operator could not. For example with floating
+ * point types -0.0 and +0.0 are classed as equal by the hash
+ * function and equality function, but some other operator may be
+ * able to tell those values apart. This means that we must put
+ * memoize into binary comparison mode so that it does bit-by-bit
+ * comparisons rather than a "logical" comparison as it would
+ * using the hash equality operator.
+ */
+ if (!OidIsValid(rinfo->hashjoinoperator))
+ *binary_mode = true;
+ }
+ }
+
+ /* Now add any lateral vars to the cache key too */
+ foreach(lc, innerrel->lateral_vars)
+ {
+ Node *expr = (Node *) lfirst(lc);
+ TypeCacheEntry *typentry;
+
+ /* Reject if there are any volatile functions */
+ if (contain_volatile_functions(expr))
+ {
+ list_free(*operators);
+ list_free(*param_exprs);
+ return false;
+ }
+
+ typentry = lookup_type_cache(exprType(expr),
+ TYPECACHE_HASH_PROC | TYPECACHE_EQ_OPR);
+
+ /* can't use a memoize node without a valid hash equals operator */
+ if (!OidIsValid(typentry->hash_proc) || !OidIsValid(typentry->eq_opr))
+ {
+ list_free(*operators);
+ list_free(*param_exprs);
+ return false;
+ }
+
+ *operators = lappend_oid(*operators, typentry->eq_opr);
+ *param_exprs = lappend(*param_exprs, expr);
+
+ /*
+ * We must go into binary mode as we don't have too much of an idea of
+ * how these lateral Vars are being used. See comment above when we
+ * set *binary_mode for the non-lateral Var case. This could be
+ * relaxed a bit if we had the RestrictInfos and knew the operators
+ * being used, however for cases like Vars that are arguments to
+ * functions we must operate in binary mode as we don't have
+ * visibility into what the function is doing with the Vars.
+ */
+ *binary_mode = true;
+ }
+
+ /* We're okay to use memoize */
+ return true;
+}
+
+/*
+ * get_memoize_path
+ * If possible, make and return a Memoize path atop of 'inner_path'.
+ * Otherwise return NULL.
+ */
+static Path *
+get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel,
+ RelOptInfo *outerrel, Path *inner_path,
+ Path *outer_path, JoinType jointype,
+ JoinPathExtraData *extra)
+{
+ RelOptInfo *top_outerrel;
+ List *param_exprs;
+ List *hash_operators;
+ ListCell *lc;
+ bool binary_mode;
+
+ /* Obviously not if it's disabled */
+ if (!enable_memoize)
+ return NULL;
+
+ /*
+ * We can safely not bother with all this unless we expect to perform more
+ * than one inner scan. The first scan is always going to be a cache
+ * miss. This would likely fail later anyway based on costs, so this is
+ * really just to save some wasted effort.
+ */
+ if (outer_path->parent->rows < 2)
+ return NULL;
+
+ /*
+ * We can only have a memoize node when there's some kind of cache key,
+ * either parameterized path clauses or lateral Vars. No cache key sounds
+ * more like something a Materialize node might be more useful for.
+ */
+ if ((inner_path->param_info == NULL ||
+ inner_path->param_info->ppi_clauses == NIL) &&
+ innerrel->lateral_vars == NIL)
+ return NULL;
+
+ /*
+ * Currently we don't do this for SEMI and ANTI joins unless they're
+ * marked as inner_unique. This is because nested loop SEMI/ANTI joins
+ * don't scan the inner node to completion, which will mean memoize cannot
+ * mark the cache entry as complete.
+ *
+ * XXX Currently we don't attempt to mark SEMI/ANTI joins as inner_unique
+ * = true. Should we? See add_paths_to_joinrel()
+ */
+ if (!extra->inner_unique && (jointype == JOIN_SEMI ||
+ jointype == JOIN_ANTI))
+ return NULL;
+
+ /*
+ * Memoize normally marks cache entries as complete when it runs out of
+ * tuples to read from its subplan. However, with unique joins, Nested
+ * Loop will skip to the next outer tuple after finding the first matching
+ * inner tuple. This means that we may not read the inner side of the
+ * join to completion which leaves no opportunity to mark the cache entry
+ * as complete. To work around that, when the join is unique we
+ * automatically mark cache entries as complete after fetching the first
+ * tuple. This works when the entire join condition is parameterized.
+ * Otherwise, when the parameterization is only a subset of the join
+ * condition, we can't be sure which part of it causes the join to be
+ * unique. This means there are no guarantees that only 1 tuple will be
+ * read. We cannot mark the cache entry as complete after reading the
+ * first tuple without that guarantee. This means the scope of Memoize
+ * node's usefulness is limited to only outer rows that have no join
+ * partner as this is the only case where Nested Loop would exhaust the
+ * inner scan of a unique join. Since the scope is limited to that, we
+ * just don't bother making a memoize path in this case.
+ *
+ * Lateral vars needn't be considered here as they're not considered when
+ * determining if the join is unique.
+ *
+ * XXX this could be enabled if the remaining join quals were made part of
+ * the inner scan's filter instead of the join filter. Maybe it's worth
+ * considering doing that?
+ */
+ if (extra->inner_unique &&
+ (inner_path->param_info == NULL ||
+ list_length(inner_path->param_info->ppi_clauses) <
+ list_length(extra->restrictlist)))
+ return NULL;
+
+ /*
+ * We can't use a memoize node if there are volatile functions in the
+ * inner rel's target list or restrict list. A cache hit could reduce the
+ * number of calls to these functions.
+ */
+ if (contain_volatile_functions((Node *) innerrel->reltarget))
+ return NULL;
+
+ foreach(lc, innerrel->baserestrictinfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ if (contain_volatile_functions((Node *) rinfo))
+ return NULL;
+ }
+
+ /*
+ * Also check the parameterized path restrictinfos for volatile functions.
+ * Indexed functions must be immutable so shouldn't have any volatile
+ * functions, however, with a lateral join the inner scan may not be an
+ * index scan.
+ */
+ if (inner_path->param_info != NULL)
+ {
+ foreach(lc, inner_path->param_info->ppi_clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ if (contain_volatile_functions((Node *) rinfo))
+ return NULL;
+ }
+ }
+
+ /*
+ * When considering a partitionwise join, we have clauses that reference
+ * the outerrel's top parent not outerrel itself.
+ */
+ if (outerrel->reloptkind == RELOPT_OTHER_MEMBER_REL)
+ top_outerrel = find_base_rel(root, bms_singleton_member(outerrel->top_parent_relids));
+ else if (outerrel->reloptkind == RELOPT_OTHER_JOINREL)
+ top_outerrel = find_join_rel(root, outerrel->top_parent_relids);
+ else
+ top_outerrel = outerrel;
+
+ /* Check if we have hash ops for each parameter to the path */
+ if (paraminfo_get_equal_hashops(root,
+ inner_path->param_info,
+ top_outerrel,
+ innerrel,
+ &param_exprs,
+ &hash_operators,
+ &binary_mode))
+ {
+ return (Path *) create_memoize_path(root,
+ innerrel,
+ inner_path,
+ param_exprs,
+ hash_operators,
+ extra->inner_unique,
+ binary_mode,
+ outer_path->rows);
+ }
+
+ return NULL;
+}
+
+/*
+ * try_nestloop_path
+ * Consider a nestloop join path; if it appears useful, push it into
+ * the joinrel's pathlist via add_path().
+ */
+static void
+try_nestloop_path(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ Path *outer_path,
+ Path *inner_path,
+ List *pathkeys,
+ JoinType jointype,
+ JoinPathExtraData *extra)
+{
+ Relids required_outer;
+ JoinCostWorkspace workspace;
+ RelOptInfo *innerrel = inner_path->parent;
+ RelOptInfo *outerrel = outer_path->parent;
+ Relids innerrelids;
+ Relids outerrelids;
+ Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
+ Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
+
+ /*
+ * Paths are parameterized by top-level parents, so run parameterization
+ * tests on the parent relids.
+ */
+ if (innerrel->top_parent_relids)
+ innerrelids = innerrel->top_parent_relids;
+ else
+ innerrelids = innerrel->relids;
+
+ if (outerrel->top_parent_relids)
+ outerrelids = outerrel->top_parent_relids;
+ else
+ outerrelids = outerrel->relids;
+
+ /*
+ * Check to see if proposed path is still parameterized, and reject if the
+ * parameterization wouldn't be sensible --- unless allow_star_schema_join
+ * says to allow it anyway. Also, we must reject if have_dangerous_phv
+ * doesn't like the look of it, which could only happen if the nestloop is
+ * still parameterized.
+ */
+ required_outer = calc_nestloop_required_outer(outerrelids, outer_paramrels,
+ innerrelids, inner_paramrels);
+ if (required_outer &&
+ ((!bms_overlap(required_outer, extra->param_source_rels) &&
+ !allow_star_schema_join(root, outerrelids, inner_paramrels)) ||
+ have_dangerous_phv(root, outerrelids, inner_paramrels)))
+ {
+ /* Waste no memory when we reject a path here */
+ bms_free(required_outer);
+ return;
+ }
+
+ /*
+ * Do a precheck to quickly eliminate obviously-inferior paths. We
+ * calculate a cheap lower bound on the path's cost and then use
+ * add_path_precheck() to see if the path is clearly going to be dominated
+ * by some existing path for the joinrel. If not, do the full pushup with
+ * creating a fully valid path structure and submitting it to add_path().
+ * The latter two steps are expensive enough to make this two-phase
+ * methodology worthwhile.
+ */
+ initial_cost_nestloop(root, &workspace, jointype,
+ outer_path, inner_path, extra);
+
+ if (add_path_precheck(joinrel,
+ workspace.startup_cost, workspace.total_cost,
+ pathkeys, required_outer))
+ {
+ /*
+ * If the inner path is parameterized, it is parameterized by the
+ * topmost parent of the outer rel, not the outer rel itself. Fix
+ * that.
+ */
+ if (PATH_PARAM_BY_PARENT(inner_path, outer_path->parent))
+ {
+ inner_path = reparameterize_path_by_child(root, inner_path,
+ outer_path->parent);
+
+ /*
+ * If we could not translate the path, we can't create nest loop
+ * path.
+ */
+ if (!inner_path)
+ {
+ bms_free(required_outer);
+ return;
+ }
+ }
+
+ add_path(joinrel, (Path *)
+ create_nestloop_path(root,
+ joinrel,
+ jointype,
+ &workspace,
+ extra,
+ outer_path,
+ inner_path,
+ extra->restrictlist,
+ pathkeys,
+ required_outer));
+ }
+ else
+ {
+ /* Waste no memory when we reject a path here */
+ bms_free(required_outer);
+ }
+}
+
+/*
+ * try_partial_nestloop_path
+ * Consider a partial nestloop join path; if it appears useful, push it into
+ * the joinrel's partial_pathlist via add_partial_path().
+ */
+static void
+try_partial_nestloop_path(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ Path *outer_path,
+ Path *inner_path,
+ List *pathkeys,
+ JoinType jointype,
+ JoinPathExtraData *extra)
+{
+ JoinCostWorkspace workspace;
+
+ /*
+ * If the inner path is parameterized, the parameterization must be fully
+ * satisfied by the proposed outer path. Parameterized partial paths are
+ * not supported. The caller should already have verified that no lateral
+ * rels are required here.
+ */
+ Assert(bms_is_empty(joinrel->lateral_relids));
+ if (inner_path->param_info != NULL)
+ {
+ Relids inner_paramrels = inner_path->param_info->ppi_req_outer;
+ RelOptInfo *outerrel = outer_path->parent;
+ Relids outerrelids;
+
+ /*
+ * The inner and outer paths are parameterized, if at all, by the top
+ * level parents, not the child relations, so we must use those relids
+ * for our parameterization tests.
+ */
+ if (outerrel->top_parent_relids)
+ outerrelids = outerrel->top_parent_relids;
+ else
+ outerrelids = outerrel->relids;
+
+ if (!bms_is_subset(inner_paramrels, outerrelids))
+ return;
+ }
+
+ /*
+ * Before creating a path, get a quick lower bound on what it is likely to
+ * cost. Bail out right away if it looks terrible.
+ */
+ initial_cost_nestloop(root, &workspace, jointype,
+ outer_path, inner_path, extra);
+ if (!add_partial_path_precheck(joinrel, workspace.total_cost, pathkeys))
+ return;
+
+ /*
+ * If the inner path is parameterized, it is parameterized by the topmost
+ * parent of the outer rel, not the outer rel itself. Fix that.
+ */
+ if (PATH_PARAM_BY_PARENT(inner_path, outer_path->parent))
+ {
+ inner_path = reparameterize_path_by_child(root, inner_path,
+ outer_path->parent);
+
+ /*
+ * If we could not translate the path, we can't create nest loop path.
+ */
+ if (!inner_path)
+ return;
+ }
+
+ /* Might be good enough to be worth trying, so let's try it. */
+ add_partial_path(joinrel, (Path *)
+ create_nestloop_path(root,
+ joinrel,
+ jointype,
+ &workspace,
+ extra,
+ outer_path,
+ inner_path,
+ extra->restrictlist,
+ pathkeys,
+ NULL));
+}
+
+/*
+ * try_mergejoin_path
+ * Consider a merge join path; if it appears useful, push it into
+ * the joinrel's pathlist via add_path().
+ */
+static void
+try_mergejoin_path(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ Path *outer_path,
+ Path *inner_path,
+ List *pathkeys,
+ List *mergeclauses,
+ List *outersortkeys,
+ List *innersortkeys,
+ JoinType jointype,
+ JoinPathExtraData *extra,
+ bool is_partial)
+{
+ Relids required_outer;
+ JoinCostWorkspace workspace;
+
+ if (is_partial)
+ {
+ try_partial_mergejoin_path(root,
+ joinrel,
+ outer_path,
+ inner_path,
+ pathkeys,
+ mergeclauses,
+ outersortkeys,
+ innersortkeys,
+ jointype,
+ extra);
+ return;
+ }
+
+ /*
+ * Check to see if proposed path is still parameterized, and reject if the
+ * parameterization wouldn't be sensible.
+ */
+ required_outer = calc_non_nestloop_required_outer(outer_path,
+ inner_path);
+ if (required_outer &&
+ !bms_overlap(required_outer, extra->param_source_rels))
+ {
+ /* Waste no memory when we reject a path here */
+ bms_free(required_outer);
+ return;
+ }
+
+ /*
+ * If the given paths are already well enough ordered, we can skip doing
+ * an explicit sort.
+ */
+ if (outersortkeys &&
+ pathkeys_contained_in(outersortkeys, outer_path->pathkeys))
+ outersortkeys = NIL;
+ if (innersortkeys &&
+ pathkeys_contained_in(innersortkeys, inner_path->pathkeys))
+ innersortkeys = NIL;
+
+ /*
+ * See comments in try_nestloop_path().
+ */
+ initial_cost_mergejoin(root, &workspace, jointype, mergeclauses,
+ outer_path, inner_path,
+ outersortkeys, innersortkeys,
+ extra);
+
+ if (add_path_precheck(joinrel,
+ workspace.startup_cost, workspace.total_cost,
+ pathkeys, required_outer))
+ {
+ add_path(joinrel, (Path *)
+ create_mergejoin_path(root,
+ joinrel,
+ jointype,
+ &workspace,
+ extra,
+ outer_path,
+ inner_path,
+ extra->restrictlist,
+ pathkeys,
+ required_outer,
+ mergeclauses,
+ outersortkeys,
+ innersortkeys));
+ }
+ else
+ {
+ /* Waste no memory when we reject a path here */
+ bms_free(required_outer);
+ }
+}
+
+/*
+ * try_partial_mergejoin_path
+ * Consider a partial merge join path; if it appears useful, push it into
+ * the joinrel's pathlist via add_partial_path().
+ */
+static void
+try_partial_mergejoin_path(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ Path *outer_path,
+ Path *inner_path,
+ List *pathkeys,
+ List *mergeclauses,
+ List *outersortkeys,
+ List *innersortkeys,
+ JoinType jointype,
+ JoinPathExtraData *extra)
+{
+ JoinCostWorkspace workspace;
+
+ /*
+ * See comments in try_partial_hashjoin_path().
+ */
+ Assert(bms_is_empty(joinrel->lateral_relids));
+ if (inner_path->param_info != NULL)
+ {
+ Relids inner_paramrels = inner_path->param_info->ppi_req_outer;
+
+ if (!bms_is_empty(inner_paramrels))
+ return;
+ }
+
+ /*
+ * If the given paths are already well enough ordered, we can skip doing
+ * an explicit sort.
+ */
+ if (outersortkeys &&
+ pathkeys_contained_in(outersortkeys, outer_path->pathkeys))
+ outersortkeys = NIL;
+ if (innersortkeys &&
+ pathkeys_contained_in(innersortkeys, inner_path->pathkeys))
+ innersortkeys = NIL;
+
+ /*
+ * See comments in try_partial_nestloop_path().
+ */
+ initial_cost_mergejoin(root, &workspace, jointype, mergeclauses,
+ outer_path, inner_path,
+ outersortkeys, innersortkeys,
+ extra);
+
+ if (!add_partial_path_precheck(joinrel, workspace.total_cost, pathkeys))
+ return;
+
+ /* Might be good enough to be worth trying, so let's try it. */
+ add_partial_path(joinrel, (Path *)
+ create_mergejoin_path(root,
+ joinrel,
+ jointype,
+ &workspace,
+ extra,
+ outer_path,
+ inner_path,
+ extra->restrictlist,
+ pathkeys,
+ NULL,
+ mergeclauses,
+ outersortkeys,
+ innersortkeys));
+}
+
+/*
+ * try_hashjoin_path
+ * Consider a hash join path; if it appears useful, push it into
+ * the joinrel's pathlist via add_path().
+ */
+static void
+try_hashjoin_path(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ Path *outer_path,
+ Path *inner_path,
+ List *hashclauses,
+ JoinType jointype,
+ JoinPathExtraData *extra)
+{
+ Relids required_outer;
+ JoinCostWorkspace workspace;
+
+ /*
+ * Check to see if proposed path is still parameterized, and reject if the
+ * parameterization wouldn't be sensible.
+ */
+ required_outer = calc_non_nestloop_required_outer(outer_path,
+ inner_path);
+ if (required_outer &&
+ !bms_overlap(required_outer, extra->param_source_rels))
+ {
+ /* Waste no memory when we reject a path here */
+ bms_free(required_outer);
+ return;
+ }
+
+ /*
+ * See comments in try_nestloop_path(). Also note that hashjoin paths
+ * never have any output pathkeys, per comments in create_hashjoin_path.
+ */
+ initial_cost_hashjoin(root, &workspace, jointype, hashclauses,
+ outer_path, inner_path, extra, false);
+
+ if (add_path_precheck(joinrel,
+ workspace.startup_cost, workspace.total_cost,
+ NIL, required_outer))
+ {
+ add_path(joinrel, (Path *)
+ create_hashjoin_path(root,
+ joinrel,
+ jointype,
+ &workspace,
+ extra,
+ outer_path,
+ inner_path,
+ false, /* parallel_hash */
+ extra->restrictlist,
+ required_outer,
+ hashclauses));
+ }
+ else
+ {
+ /* Waste no memory when we reject a path here */
+ bms_free(required_outer);
+ }
+}
+
+/*
+ * try_partial_hashjoin_path
+ * Consider a partial hashjoin join path; if it appears useful, push it into
+ * the joinrel's partial_pathlist via add_partial_path().
+ * The outer side is partial. If parallel_hash is true, then the inner path
+ * must be partial and will be run in parallel to create one or more shared
+ * hash tables; otherwise the inner path must be complete and a copy of it
+ * is run in every process to create separate identical private hash tables.
+ */
+static void
+try_partial_hashjoin_path(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ Path *outer_path,
+ Path *inner_path,
+ List *hashclauses,
+ JoinType jointype,
+ JoinPathExtraData *extra,
+ bool parallel_hash)
+{
+ JoinCostWorkspace workspace;
+
+ /*
+ * If the inner path is parameterized, the parameterization must be fully
+ * satisfied by the proposed outer path. Parameterized partial paths are
+ * not supported. The caller should already have verified that no lateral
+ * rels are required here.
+ */
+ Assert(bms_is_empty(joinrel->lateral_relids));
+ if (inner_path->param_info != NULL)
+ {
+ Relids inner_paramrels = inner_path->param_info->ppi_req_outer;
+
+ if (!bms_is_empty(inner_paramrels))
+ return;
+ }
+
+ /*
+ * Before creating a path, get a quick lower bound on what it is likely to
+ * cost. Bail out right away if it looks terrible.
+ */
+ initial_cost_hashjoin(root, &workspace, jointype, hashclauses,
+ outer_path, inner_path, extra, parallel_hash);
+ if (!add_partial_path_precheck(joinrel, workspace.total_cost, NIL))
+ return;
+
+ /* Might be good enough to be worth trying, so let's try it. */
+ add_partial_path(joinrel, (Path *)
+ create_hashjoin_path(root,
+ joinrel,
+ jointype,
+ &workspace,
+ extra,
+ outer_path,
+ inner_path,
+ parallel_hash,
+ extra->restrictlist,
+ NULL,
+ hashclauses));
+}
+
+/*
+ * clause_sides_match_join
+ * Determine whether a join clause is of the right form to use in this join.
+ *
+ * We already know that the clause is a binary opclause referencing only the
+ * rels in the current join. The point here is to check whether it has the
+ * form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr",
+ * rather than mixing outer and inner vars on either side. If it matches,
+ * we set the transient flag outer_is_left to identify which side is which.
+ */
+static inline bool
+clause_sides_match_join(RestrictInfo *rinfo, RelOptInfo *outerrel,
+ RelOptInfo *innerrel)
+{
+ if (bms_is_subset(rinfo->left_relids, outerrel->relids) &&
+ bms_is_subset(rinfo->right_relids, innerrel->relids))
+ {
+ /* lefthand side is outer */
+ rinfo->outer_is_left = true;
+ return true;
+ }
+ else if (bms_is_subset(rinfo->left_relids, innerrel->relids) &&
+ bms_is_subset(rinfo->right_relids, outerrel->relids))
+ {
+ /* righthand side is outer */
+ rinfo->outer_is_left = false;
+ return true;
+ }
+ return false; /* no good for these input relations */
+}
+
+/*
+ * sort_inner_and_outer
+ * Create mergejoin join paths by explicitly sorting both the outer and
+ * inner join relations on each available merge ordering.
+ *
+ * 'joinrel' is the join relation
+ * 'outerrel' is the outer join relation
+ * 'innerrel' is the inner join relation
+ * 'jointype' is the type of join to do
+ * 'extra' contains additional input values
+ */
+static void
+sort_inner_and_outer(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra)
+{
+ JoinType save_jointype = jointype;
+ Path *outer_path;
+ Path *inner_path;
+ Path *cheapest_partial_outer = NULL;
+ Path *cheapest_safe_inner = NULL;
+ List *all_pathkeys;
+ ListCell *l;
+
+ /*
+ * We only consider the cheapest-total-cost input paths, since we are
+ * assuming here that a sort is required. We will consider
+ * cheapest-startup-cost input paths later, and only if they don't need a
+ * sort.
+ *
+ * This function intentionally does not consider parameterized input
+ * paths, except when the cheapest-total is parameterized. If we did so,
+ * we'd have a combinatorial explosion of mergejoin paths of dubious
+ * value. This interacts with decisions elsewhere that also discriminate
+ * against mergejoins with parameterized inputs; see comments in
+ * src/backend/optimizer/README.
+ */
+ outer_path = outerrel->cheapest_total_path;
+ inner_path = innerrel->cheapest_total_path;
+
+ /*
+ * If either cheapest-total path is parameterized by the other rel, we
+ * can't use a mergejoin. (There's no use looking for alternative input
+ * paths, since these should already be the least-parameterized available
+ * paths.)
+ */
+ if (PATH_PARAM_BY_REL(outer_path, innerrel) ||
+ PATH_PARAM_BY_REL(inner_path, outerrel))
+ return;
+
+ /*
+ * If unique-ification is requested, do it and then handle as a plain
+ * inner join.
+ */
+ if (jointype == JOIN_UNIQUE_OUTER)
+ {
+ outer_path = (Path *) create_unique_path(root, outerrel,
+ outer_path, extra->sjinfo);
+ Assert(outer_path);
+ jointype = JOIN_INNER;
+ }
+ else if (jointype == JOIN_UNIQUE_INNER)
+ {
+ inner_path = (Path *) create_unique_path(root, innerrel,
+ inner_path, extra->sjinfo);
+ Assert(inner_path);
+ jointype = JOIN_INNER;
+ }
+
+ /*
+ * If the joinrel is parallel-safe, we may be able to consider a partial
+ * merge join. However, we can't handle JOIN_UNIQUE_OUTER, because the
+ * outer path will be partial, and therefore we won't be able to properly
+ * guarantee uniqueness. Similarly, we can't handle JOIN_FULL and
+ * JOIN_RIGHT, because they can produce false null extended rows. Also,
+ * the resulting path must not be parameterized.
+ */
+ if (joinrel->consider_parallel &&
+ save_jointype != JOIN_UNIQUE_OUTER &&
+ save_jointype != JOIN_FULL &&
+ save_jointype != JOIN_RIGHT &&
+ outerrel->partial_pathlist != NIL &&
+ bms_is_empty(joinrel->lateral_relids))
+ {
+ cheapest_partial_outer = (Path *) linitial(outerrel->partial_pathlist);
+
+ if (inner_path->parallel_safe)
+ cheapest_safe_inner = inner_path;
+ else if (save_jointype != JOIN_UNIQUE_INNER)
+ cheapest_safe_inner =
+ get_cheapest_parallel_safe_total_inner(innerrel->pathlist);
+ }
+
+ /*
+ * Each possible ordering of the available mergejoin clauses will generate
+ * a differently-sorted result path at essentially the same cost. We have
+ * no basis for choosing one over another at this level of joining, but
+ * some sort orders may be more useful than others for higher-level
+ * mergejoins, so it's worth considering multiple orderings.
+ *
+ * Actually, it's not quite true that every mergeclause ordering will
+ * generate a different path order, because some of the clauses may be
+ * partially redundant (refer to the same EquivalenceClasses). Therefore,
+ * what we do is convert the mergeclause list to a list of canonical
+ * pathkeys, and then consider different orderings of the pathkeys.
+ *
+ * Generating a path for *every* permutation of the pathkeys doesn't seem
+ * like a winning strategy; the cost in planning time is too high. For
+ * now, we generate one path for each pathkey, listing that pathkey first
+ * and the rest in random order. This should allow at least a one-clause
+ * mergejoin without re-sorting against any other possible mergejoin
+ * partner path. But if we've not guessed the right ordering of secondary
+ * keys, we may end up evaluating clauses as qpquals when they could have
+ * been done as mergeclauses. (In practice, it's rare that there's more
+ * than two or three mergeclauses, so expending a huge amount of thought
+ * on that is probably not worth it.)
+ *
+ * The pathkey order returned by select_outer_pathkeys_for_merge() has
+ * some heuristics behind it (see that function), so be sure to try it
+ * exactly as-is as well as making variants.
+ */
+ all_pathkeys = select_outer_pathkeys_for_merge(root,
+ extra->mergeclause_list,
+ joinrel);
+
+ foreach(l, all_pathkeys)
+ {
+ PathKey *front_pathkey = (PathKey *) lfirst(l);
+ List *cur_mergeclauses;
+ List *outerkeys;
+ List *innerkeys;
+ List *merge_pathkeys;
+
+ /* Make a pathkey list with this guy first */
+ if (l != list_head(all_pathkeys))
+ outerkeys = lcons(front_pathkey,
+ list_delete_nth_cell(list_copy(all_pathkeys),
+ foreach_current_index(l)));
+ else
+ outerkeys = all_pathkeys; /* no work at first one... */
+
+ /* Sort the mergeclauses into the corresponding ordering */
+ cur_mergeclauses =
+ find_mergeclauses_for_outer_pathkeys(root,
+ outerkeys,
+ extra->mergeclause_list);
+
+ /* Should have used them all... */
+ Assert(list_length(cur_mergeclauses) == list_length(extra->mergeclause_list));
+
+ /* Build sort pathkeys for the inner side */
+ innerkeys = make_inner_pathkeys_for_merge(root,
+ cur_mergeclauses,
+ outerkeys);
+
+ /* Build pathkeys representing output sort order */
+ merge_pathkeys = build_join_pathkeys(root, joinrel, jointype,
+ outerkeys);
+
+ /*
+ * And now we can make the path.
+ *
+ * Note: it's possible that the cheapest paths will already be sorted
+ * properly. try_mergejoin_path will detect that case and suppress an
+ * explicit sort step, so we needn't do so here.
+ */
+ try_mergejoin_path(root,
+ joinrel,
+ outer_path,
+ inner_path,
+ merge_pathkeys,
+ cur_mergeclauses,
+ outerkeys,
+ innerkeys,
+ jointype,
+ extra,
+ false);
+
+ /*
+ * If we have partial outer and parallel safe inner path then try
+ * partial mergejoin path.
+ */
+ if (cheapest_partial_outer && cheapest_safe_inner)
+ try_partial_mergejoin_path(root,
+ joinrel,
+ cheapest_partial_outer,
+ cheapest_safe_inner,
+ merge_pathkeys,
+ cur_mergeclauses,
+ outerkeys,
+ innerkeys,
+ jointype,
+ extra);
+ }
+}
+
+/*
+ * generate_mergejoin_paths
+ * Creates possible mergejoin paths for input outerpath.
+ *
+ * We generate mergejoins if mergejoin clauses are available. We have
+ * two ways to generate the inner path for a mergejoin: sort the cheapest
+ * inner path, or use an inner path that is already suitably ordered for the
+ * merge. If we have several mergeclauses, it could be that there is no inner
+ * path (or only a very expensive one) for the full list of mergeclauses, but
+ * better paths exist if we truncate the mergeclause list (thereby discarding
+ * some sort key requirements). So, we consider truncations of the
+ * mergeclause list as well as the full list. (Ideally we'd consider all
+ * subsets of the mergeclause list, but that seems way too expensive.)
+ */
+static void
+generate_mergejoin_paths(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *innerrel,
+ Path *outerpath,
+ JoinType jointype,
+ JoinPathExtraData *extra,
+ bool useallclauses,
+ Path *inner_cheapest_total,
+ List *merge_pathkeys,
+ bool is_partial)
+{
+ List *mergeclauses;
+ List *innersortkeys;
+ List *trialsortkeys;
+ Path *cheapest_startup_inner;
+ Path *cheapest_total_inner;
+ JoinType save_jointype = jointype;
+ int num_sortkeys;
+ int sortkeycnt;
+
+ if (jointype == JOIN_UNIQUE_OUTER || jointype == JOIN_UNIQUE_INNER)
+ jointype = JOIN_INNER;
+
+ /* Look for useful mergeclauses (if any) */
+ mergeclauses =
+ find_mergeclauses_for_outer_pathkeys(root,
+ outerpath->pathkeys,
+ extra->mergeclause_list);
+
+ /*
+ * Done with this outer path if no chance for a mergejoin.
+ *
+ * Special corner case: for "x FULL JOIN y ON true", there will be no join
+ * clauses at all. Ordinarily we'd generate a clauseless nestloop path,
+ * but since mergejoin is our only join type that supports FULL JOIN
+ * without any join clauses, it's necessary to generate a clauseless
+ * mergejoin path instead.
+ */
+ if (mergeclauses == NIL)
+ {
+ if (jointype == JOIN_FULL)
+ /* okay to try for mergejoin */ ;
+ else
+ return;
+ }
+ if (useallclauses &&
+ list_length(mergeclauses) != list_length(extra->mergeclause_list))
+ return;
+
+ /* Compute the required ordering of the inner path */
+ innersortkeys = make_inner_pathkeys_for_merge(root,
+ mergeclauses,
+ outerpath->pathkeys);
+
+ /*
+ * Generate a mergejoin on the basis of sorting the cheapest inner. Since
+ * a sort will be needed, only cheapest total cost matters. (But
+ * try_mergejoin_path will do the right thing if inner_cheapest_total is
+ * already correctly sorted.)
+ */
+ try_mergejoin_path(root,
+ joinrel,
+ outerpath,
+ inner_cheapest_total,
+ merge_pathkeys,
+ mergeclauses,
+ NIL,
+ innersortkeys,
+ jointype,
+ extra,
+ is_partial);
+
+ /* Can't do anything else if inner path needs to be unique'd */
+ if (save_jointype == JOIN_UNIQUE_INNER)
+ return;
+
+ /*
+ * Look for presorted inner paths that satisfy the innersortkey list ---
+ * or any truncation thereof, if we are allowed to build a mergejoin using
+ * a subset of the merge clauses. Here, we consider both cheap startup
+ * cost and cheap total cost.
+ *
+ * Currently we do not consider parameterized inner paths here. This
+ * interacts with decisions elsewhere that also discriminate against
+ * mergejoins with parameterized inputs; see comments in
+ * src/backend/optimizer/README.
+ *
+ * As we shorten the sortkey list, we should consider only paths that are
+ * strictly cheaper than (in particular, not the same as) any path found
+ * in an earlier iteration. Otherwise we'd be intentionally using fewer
+ * merge keys than a given path allows (treating the rest as plain
+ * joinquals), which is unlikely to be a good idea. Also, eliminating
+ * paths here on the basis of compare_path_costs is a lot cheaper than
+ * building the mergejoin path only to throw it away.
+ *
+ * If inner_cheapest_total is well enough sorted to have not required a
+ * sort in the path made above, we shouldn't make a duplicate path with
+ * it, either. We handle that case with the same logic that handles the
+ * previous consideration, by initializing the variables that track
+ * cheapest-so-far properly. Note that we do NOT reject
+ * inner_cheapest_total if we find it matches some shorter set of
+ * pathkeys. That case corresponds to using fewer mergekeys to avoid
+ * sorting inner_cheapest_total, whereas we did sort it above, so the
+ * plans being considered are different.
+ */
+ if (pathkeys_contained_in(innersortkeys,
+ inner_cheapest_total->pathkeys))
+ {
+ /* inner_cheapest_total didn't require a sort */
+ cheapest_startup_inner = inner_cheapest_total;
+ cheapest_total_inner = inner_cheapest_total;
+ }
+ else
+ {
+ /* it did require a sort, at least for the full set of keys */
+ cheapest_startup_inner = NULL;
+ cheapest_total_inner = NULL;
+ }
+ num_sortkeys = list_length(innersortkeys);
+ if (num_sortkeys > 1 && !useallclauses)
+ trialsortkeys = list_copy(innersortkeys); /* need modifiable copy */
+ else
+ trialsortkeys = innersortkeys; /* won't really truncate */
+
+ for (sortkeycnt = num_sortkeys; sortkeycnt > 0; sortkeycnt--)
+ {
+ Path *innerpath;
+ List *newclauses = NIL;
+
+ /*
+ * Look for an inner path ordered well enough for the first
+ * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
+ * destructively, which is why we made a copy...
+ */
+ trialsortkeys = list_truncate(trialsortkeys, sortkeycnt);
+ innerpath = get_cheapest_path_for_pathkeys(innerrel->pathlist,
+ trialsortkeys,
+ NULL,
+ TOTAL_COST,
+ is_partial);
+ if (innerpath != NULL &&
+ (cheapest_total_inner == NULL ||
+ compare_path_costs(innerpath, cheapest_total_inner,
+ TOTAL_COST) < 0))
+ {
+ /* Found a cheap (or even-cheaper) sorted path */
+ /* Select the right mergeclauses, if we didn't already */
+ if (sortkeycnt < num_sortkeys)
+ {
+ newclauses =
+ trim_mergeclauses_for_inner_pathkeys(root,
+ mergeclauses,
+ trialsortkeys);
+ Assert(newclauses != NIL);
+ }
+ else
+ newclauses = mergeclauses;
+ try_mergejoin_path(root,
+ joinrel,
+ outerpath,
+ innerpath,
+ merge_pathkeys,
+ newclauses,
+ NIL,
+ NIL,
+ jointype,
+ extra,
+ is_partial);
+ cheapest_total_inner = innerpath;
+ }
+ /* Same on the basis of cheapest startup cost ... */
+ innerpath = get_cheapest_path_for_pathkeys(innerrel->pathlist,
+ trialsortkeys,
+ NULL,
+ STARTUP_COST,
+ is_partial);
+ if (innerpath != NULL &&
+ (cheapest_startup_inner == NULL ||
+ compare_path_costs(innerpath, cheapest_startup_inner,
+ STARTUP_COST) < 0))
+ {
+ /* Found a cheap (or even-cheaper) sorted path */
+ if (innerpath != cheapest_total_inner)
+ {
+ /*
+ * Avoid rebuilding clause list if we already made one; saves
+ * memory in big join trees...
+ */
+ if (newclauses == NIL)
+ {
+ if (sortkeycnt < num_sortkeys)
+ {
+ newclauses =
+ trim_mergeclauses_for_inner_pathkeys(root,
+ mergeclauses,
+ trialsortkeys);
+ Assert(newclauses != NIL);
+ }
+ else
+ newclauses = mergeclauses;
+ }
+ try_mergejoin_path(root,
+ joinrel,
+ outerpath,
+ innerpath,
+ merge_pathkeys,
+ newclauses,
+ NIL,
+ NIL,
+ jointype,
+ extra,
+ is_partial);
+ }
+ cheapest_startup_inner = innerpath;
+ }
+
+ /*
+ * Don't consider truncated sortkeys if we need all clauses.
+ */
+ if (useallclauses)
+ break;
+ }
+}
+
+/*
+ * match_unsorted_outer
+ * Creates possible join paths for processing a single join relation
+ * 'joinrel' by employing either iterative substitution or
+ * mergejoining on each of its possible outer paths (considering
+ * only outer paths that are already ordered well enough for merging).
+ *
+ * We always generate a nestloop path for each available outer path.
+ * In fact we may generate as many as five: one on the cheapest-total-cost
+ * inner path, one on the same with materialization, one on the
+ * cheapest-startup-cost inner path (if different), one on the
+ * cheapest-total inner-indexscan path (if any), and one on the
+ * cheapest-startup inner-indexscan path (if different).
+ *
+ * We also consider mergejoins if mergejoin clauses are available. See
+ * detailed comments in generate_mergejoin_paths.
+ *
+ * 'joinrel' is the join relation
+ * 'outerrel' is the outer join relation
+ * 'innerrel' is the inner join relation
+ * 'jointype' is the type of join to do
+ * 'extra' contains additional input values
+ */
+static void
+match_unsorted_outer(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra)
+{
+ JoinType save_jointype = jointype;
+ bool nestjoinOK;
+ bool useallclauses;
+ Path *inner_cheapest_total = innerrel->cheapest_total_path;
+ Path *matpath = NULL;
+ ListCell *lc1;
+
+ /*
+ * Nestloop only supports inner, left, semi, and anti joins. Also, if we
+ * are doing a right or full mergejoin, we must use *all* the mergeclauses
+ * as join clauses, else we will not have a valid plan. (Although these
+ * two flags are currently inverses, keep them separate for clarity and
+ * possible future changes.)
+ */
+ switch (jointype)
+ {
+ case JOIN_INNER:
+ case JOIN_LEFT:
+ case JOIN_SEMI:
+ case JOIN_ANTI:
+ nestjoinOK = true;
+ useallclauses = false;
+ break;
+ case JOIN_RIGHT:
+ case JOIN_FULL:
+ nestjoinOK = false;
+ useallclauses = true;
+ break;
+ case JOIN_UNIQUE_OUTER:
+ case JOIN_UNIQUE_INNER:
+ jointype = JOIN_INNER;
+ nestjoinOK = true;
+ useallclauses = false;
+ break;
+ default:
+ elog(ERROR, "unrecognized join type: %d",
+ (int) jointype);
+ nestjoinOK = false; /* keep compiler quiet */
+ useallclauses = false;
+ break;
+ }
+
+ /*
+ * If inner_cheapest_total is parameterized by the outer rel, ignore it;
+ * we will consider it below as a member of cheapest_parameterized_paths,
+ * but the other possibilities considered in this routine aren't usable.
+ */
+ if (PATH_PARAM_BY_REL(inner_cheapest_total, outerrel))
+ inner_cheapest_total = NULL;
+
+ /*
+ * If we need to unique-ify the inner path, we will consider only the
+ * cheapest-total inner.
+ */
+ if (save_jointype == JOIN_UNIQUE_INNER)
+ {
+ /* No way to do this with an inner path parameterized by outer rel */
+ if (inner_cheapest_total == NULL)
+ return;
+ inner_cheapest_total = (Path *)
+ create_unique_path(root, innerrel, inner_cheapest_total, extra->sjinfo);
+ Assert(inner_cheapest_total);
+ }
+ else if (nestjoinOK)
+ {
+ /*
+ * Consider materializing the cheapest inner path, unless
+ * enable_material is off or the path in question materializes its
+ * output anyway.
+ */
+ if (enable_material && inner_cheapest_total != NULL &&
+ !ExecMaterializesOutput(inner_cheapest_total->pathtype))
+ matpath = (Path *)
+ create_material_path(innerrel, inner_cheapest_total);
+ }
+
+ foreach(lc1, outerrel->pathlist)
+ {
+ Path *outerpath = (Path *) lfirst(lc1);
+ List *merge_pathkeys;
+
+ /*
+ * We cannot use an outer path that is parameterized by the inner rel.
+ */
+ if (PATH_PARAM_BY_REL(outerpath, innerrel))
+ continue;
+
+ /*
+ * If we need to unique-ify the outer path, it's pointless to consider
+ * any but the cheapest outer. (XXX we don't consider parameterized
+ * outers, nor inners, for unique-ified cases. Should we?)
+ */
+ if (save_jointype == JOIN_UNIQUE_OUTER)
+ {
+ if (outerpath != outerrel->cheapest_total_path)
+ continue;
+ outerpath = (Path *) create_unique_path(root, outerrel,
+ outerpath, extra->sjinfo);
+ Assert(outerpath);
+ }
+
+ /*
+ * The result will have this sort order (even if it is implemented as
+ * a nestloop, and even if some of the mergeclauses are implemented by
+ * qpquals rather than as true mergeclauses):
+ */
+ merge_pathkeys = build_join_pathkeys(root, joinrel, jointype,
+ outerpath->pathkeys);
+
+ if (save_jointype == JOIN_UNIQUE_INNER)
+ {
+ /*
+ * Consider nestloop join, but only with the unique-ified cheapest
+ * inner path
+ */
+ try_nestloop_path(root,
+ joinrel,
+ outerpath,
+ inner_cheapest_total,
+ merge_pathkeys,
+ jointype,
+ extra);
+ }
+ else if (nestjoinOK)
+ {
+ /*
+ * Consider nestloop joins using this outer path and various
+ * available paths for the inner relation. We consider the
+ * cheapest-total paths for each available parameterization of the
+ * inner relation, including the unparameterized case.
+ */
+ ListCell *lc2;
+
+ foreach(lc2, innerrel->cheapest_parameterized_paths)
+ {
+ Path *innerpath = (Path *) lfirst(lc2);
+ Path *mpath;
+
+ try_nestloop_path(root,
+ joinrel,
+ outerpath,
+ innerpath,
+ merge_pathkeys,
+ jointype,
+ extra);
+
+ /*
+ * Try generating a memoize path and see if that makes the
+ * nested loop any cheaper.
+ */
+ mpath = get_memoize_path(root, innerrel, outerrel,
+ innerpath, outerpath, jointype,
+ extra);
+ if (mpath != NULL)
+ try_nestloop_path(root,
+ joinrel,
+ outerpath,
+ mpath,
+ merge_pathkeys,
+ jointype,
+ extra);
+ }
+
+ /* Also consider materialized form of the cheapest inner path */
+ if (matpath != NULL)
+ try_nestloop_path(root,
+ joinrel,
+ outerpath,
+ matpath,
+ merge_pathkeys,
+ jointype,
+ extra);
+ }
+
+ /* Can't do anything else if outer path needs to be unique'd */
+ if (save_jointype == JOIN_UNIQUE_OUTER)
+ continue;
+
+ /* Can't do anything else if inner rel is parameterized by outer */
+ if (inner_cheapest_total == NULL)
+ continue;
+
+ /* Generate merge join paths */
+ generate_mergejoin_paths(root, joinrel, innerrel, outerpath,
+ save_jointype, extra, useallclauses,
+ inner_cheapest_total, merge_pathkeys,
+ false);
+ }
+
+ /*
+ * Consider partial nestloop and mergejoin plan if outerrel has any
+ * partial path and the joinrel is parallel-safe. However, we can't
+ * handle JOIN_UNIQUE_OUTER, because the outer path will be partial, and
+ * therefore we won't be able to properly guarantee uniqueness. Nor can
+ * we handle joins needing lateral rels, since partial paths must not be
+ * parameterized. Similarly, we can't handle JOIN_FULL and JOIN_RIGHT,
+ * because they can produce false null extended rows.
+ */
+ if (joinrel->consider_parallel &&
+ save_jointype != JOIN_UNIQUE_OUTER &&
+ save_jointype != JOIN_FULL &&
+ save_jointype != JOIN_RIGHT &&
+ outerrel->partial_pathlist != NIL &&
+ bms_is_empty(joinrel->lateral_relids))
+ {
+ if (nestjoinOK)
+ consider_parallel_nestloop(root, joinrel, outerrel, innerrel,
+ save_jointype, extra);
+
+ /*
+ * If inner_cheapest_total is NULL or non parallel-safe then find the
+ * cheapest total parallel safe path. If doing JOIN_UNIQUE_INNER, we
+ * can't use any alternative inner path.
+ */
+ if (inner_cheapest_total == NULL ||
+ !inner_cheapest_total->parallel_safe)
+ {
+ if (save_jointype == JOIN_UNIQUE_INNER)
+ return;
+
+ inner_cheapest_total = get_cheapest_parallel_safe_total_inner(innerrel->pathlist);
+ }
+
+ if (inner_cheapest_total)
+ consider_parallel_mergejoin(root, joinrel, outerrel, innerrel,
+ save_jointype, extra,
+ inner_cheapest_total);
+ }
+}
+
+/*
+ * consider_parallel_mergejoin
+ * Try to build partial paths for a joinrel by joining a partial path
+ * for the outer relation to a complete path for the inner relation.
+ *
+ * 'joinrel' is the join relation
+ * 'outerrel' is the outer join relation
+ * 'innerrel' is the inner join relation
+ * 'jointype' is the type of join to do
+ * 'extra' contains additional input values
+ * 'inner_cheapest_total' cheapest total path for innerrel
+ */
+static void
+consider_parallel_mergejoin(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra,
+ Path *inner_cheapest_total)
+{
+ ListCell *lc1;
+
+ /* generate merge join path for each partial outer path */
+ foreach(lc1, outerrel->partial_pathlist)
+ {
+ Path *outerpath = (Path *) lfirst(lc1);
+ List *merge_pathkeys;
+
+ /*
+ * Figure out what useful ordering any paths we create will have.
+ */
+ merge_pathkeys = build_join_pathkeys(root, joinrel, jointype,
+ outerpath->pathkeys);
+
+ generate_mergejoin_paths(root, joinrel, innerrel, outerpath, jointype,
+ extra, false, inner_cheapest_total,
+ merge_pathkeys, true);
+ }
+}
+
+/*
+ * consider_parallel_nestloop
+ * Try to build partial paths for a joinrel by joining a partial path for the
+ * outer relation to a complete path for the inner relation.
+ *
+ * 'joinrel' is the join relation
+ * 'outerrel' is the outer join relation
+ * 'innerrel' is the inner join relation
+ * 'jointype' is the type of join to do
+ * 'extra' contains additional input values
+ */
+static void
+consider_parallel_nestloop(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra)
+{
+ JoinType save_jointype = jointype;
+ ListCell *lc1;
+
+ if (jointype == JOIN_UNIQUE_INNER)
+ jointype = JOIN_INNER;
+
+ foreach(lc1, outerrel->partial_pathlist)
+ {
+ Path *outerpath = (Path *) lfirst(lc1);
+ List *pathkeys;
+ ListCell *lc2;
+
+ /* Figure out what useful ordering any paths we create will have. */
+ pathkeys = build_join_pathkeys(root, joinrel, jointype,
+ outerpath->pathkeys);
+
+ /*
+ * Try the cheapest parameterized paths; only those which will produce
+ * an unparameterized path when joined to this outerrel will survive
+ * try_partial_nestloop_path. The cheapest unparameterized path is
+ * also in this list.
+ */
+ foreach(lc2, innerrel->cheapest_parameterized_paths)
+ {
+ Path *innerpath = (Path *) lfirst(lc2);
+ Path *mpath;
+
+ /* Can't join to an inner path that is not parallel-safe */
+ if (!innerpath->parallel_safe)
+ continue;
+
+ /*
+ * If we're doing JOIN_UNIQUE_INNER, we can only use the inner's
+ * cheapest_total_path, and we have to unique-ify it. (We might
+ * be able to relax this to allow other safe, unparameterized
+ * inner paths, but right now create_unique_path is not on board
+ * with that.)
+ */
+ if (save_jointype == JOIN_UNIQUE_INNER)
+ {
+ if (innerpath != innerrel->cheapest_total_path)
+ continue;
+ innerpath = (Path *) create_unique_path(root, innerrel,
+ innerpath,
+ extra->sjinfo);
+ Assert(innerpath);
+ }
+
+ try_partial_nestloop_path(root, joinrel, outerpath, innerpath,
+ pathkeys, jointype, extra);
+
+ /*
+ * Try generating a memoize path and see if that makes the nested
+ * loop any cheaper.
+ */
+ mpath = get_memoize_path(root, innerrel, outerrel,
+ innerpath, outerpath, jointype,
+ extra);
+ if (mpath != NULL)
+ try_partial_nestloop_path(root, joinrel, outerpath, mpath,
+ pathkeys, jointype, extra);
+ }
+ }
+}
+
+/*
+ * hash_inner_and_outer
+ * Create hashjoin join paths by explicitly hashing both the outer and
+ * inner keys of each available hash clause.
+ *
+ * 'joinrel' is the join relation
+ * 'outerrel' is the outer join relation
+ * 'innerrel' is the inner join relation
+ * 'jointype' is the type of join to do
+ * 'extra' contains additional input values
+ */
+static void
+hash_inner_and_outer(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra)
+{
+ JoinType save_jointype = jointype;
+ bool isouterjoin = IS_OUTER_JOIN(jointype);
+ List *hashclauses;
+ ListCell *l;
+
+ /*
+ * We need to build only one hashclauses list for any given pair of outer
+ * and inner relations; all of the hashable clauses will be used as keys.
+ *
+ * Scan the join's restrictinfo list to find hashjoinable clauses that are
+ * usable with this pair of sub-relations.
+ */
+ hashclauses = NIL;
+ foreach(l, extra->restrictlist)
+ {
+ RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
+
+ /*
+ * If processing an outer join, only use its own join clauses for
+ * hashing. For inner joins we need not be so picky.
+ */
+ if (isouterjoin && RINFO_IS_PUSHED_DOWN(restrictinfo, joinrel->relids))
+ continue;
+
+ if (!restrictinfo->can_join ||
+ restrictinfo->hashjoinoperator == InvalidOid)
+ continue; /* not hashjoinable */
+
+ /*
+ * Check if clause has the form "outer op inner" or "inner op outer".
+ */
+ if (!clause_sides_match_join(restrictinfo, outerrel, innerrel))
+ continue; /* no good for these input relations */
+
+ hashclauses = lappend(hashclauses, restrictinfo);
+ }
+
+ /* If we found any usable hashclauses, make paths */
+ if (hashclauses)
+ {
+ /*
+ * We consider both the cheapest-total-cost and cheapest-startup-cost
+ * outer paths. There's no need to consider any but the
+ * cheapest-total-cost inner path, however.
+ */
+ Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
+ Path *cheapest_total_outer = outerrel->cheapest_total_path;
+ Path *cheapest_total_inner = innerrel->cheapest_total_path;
+
+ /*
+ * If either cheapest-total path is parameterized by the other rel, we
+ * can't use a hashjoin. (There's no use looking for alternative
+ * input paths, since these should already be the least-parameterized
+ * available paths.)
+ */
+ if (PATH_PARAM_BY_REL(cheapest_total_outer, innerrel) ||
+ PATH_PARAM_BY_REL(cheapest_total_inner, outerrel))
+ return;
+
+ /* Unique-ify if need be; we ignore parameterized possibilities */
+ if (jointype == JOIN_UNIQUE_OUTER)
+ {
+ cheapest_total_outer = (Path *)
+ create_unique_path(root, outerrel,
+ cheapest_total_outer, extra->sjinfo);
+ Assert(cheapest_total_outer);
+ jointype = JOIN_INNER;
+ try_hashjoin_path(root,
+ joinrel,
+ cheapest_total_outer,
+ cheapest_total_inner,
+ hashclauses,
+ jointype,
+ extra);
+ /* no possibility of cheap startup here */
+ }
+ else if (jointype == JOIN_UNIQUE_INNER)
+ {
+ cheapest_total_inner = (Path *)
+ create_unique_path(root, innerrel,
+ cheapest_total_inner, extra->sjinfo);
+ Assert(cheapest_total_inner);
+ jointype = JOIN_INNER;
+ try_hashjoin_path(root,
+ joinrel,
+ cheapest_total_outer,
+ cheapest_total_inner,
+ hashclauses,
+ jointype,
+ extra);
+ if (cheapest_startup_outer != NULL &&
+ cheapest_startup_outer != cheapest_total_outer)
+ try_hashjoin_path(root,
+ joinrel,
+ cheapest_startup_outer,
+ cheapest_total_inner,
+ hashclauses,
+ jointype,
+ extra);
+ }
+ else
+ {
+ /*
+ * For other jointypes, we consider the cheapest startup outer
+ * together with the cheapest total inner, and then consider
+ * pairings of cheapest-total paths including parameterized ones.
+ * There is no use in generating parameterized paths on the basis
+ * of possibly cheap startup cost, so this is sufficient.
+ */
+ ListCell *lc1;
+ ListCell *lc2;
+
+ if (cheapest_startup_outer != NULL)
+ try_hashjoin_path(root,
+ joinrel,
+ cheapest_startup_outer,
+ cheapest_total_inner,
+ hashclauses,
+ jointype,
+ extra);
+
+ foreach(lc1, outerrel->cheapest_parameterized_paths)
+ {
+ Path *outerpath = (Path *) lfirst(lc1);
+
+ /*
+ * We cannot use an outer path that is parameterized by the
+ * inner rel.
+ */
+ if (PATH_PARAM_BY_REL(outerpath, innerrel))
+ continue;
+
+ foreach(lc2, innerrel->cheapest_parameterized_paths)
+ {
+ Path *innerpath = (Path *) lfirst(lc2);
+
+ /*
+ * We cannot use an inner path that is parameterized by
+ * the outer rel, either.
+ */
+ if (PATH_PARAM_BY_REL(innerpath, outerrel))
+ continue;
+
+ if (outerpath == cheapest_startup_outer &&
+ innerpath == cheapest_total_inner)
+ continue; /* already tried it */
+
+ try_hashjoin_path(root,
+ joinrel,
+ outerpath,
+ innerpath,
+ hashclauses,
+ jointype,
+ extra);
+ }
+ }
+ }
+
+ /*
+ * If the joinrel is parallel-safe, we may be able to consider a
+ * partial hash join. However, we can't handle JOIN_UNIQUE_OUTER,
+ * because the outer path will be partial, and therefore we won't be
+ * able to properly guarantee uniqueness. Similarly, we can't handle
+ * JOIN_FULL and JOIN_RIGHT, because they can produce false null
+ * extended rows. Also, the resulting path must not be parameterized.
+ * We would be able to support JOIN_FULL and JOIN_RIGHT for Parallel
+ * Hash, since in that case we're back to a single hash table with a
+ * single set of match bits for each batch, but that will require
+ * figuring out a deadlock-free way to wait for the probe to finish.
+ */
+ if (joinrel->consider_parallel &&
+ save_jointype != JOIN_UNIQUE_OUTER &&
+ save_jointype != JOIN_FULL &&
+ save_jointype != JOIN_RIGHT &&
+ outerrel->partial_pathlist != NIL &&
+ bms_is_empty(joinrel->lateral_relids))
+ {
+ Path *cheapest_partial_outer;
+ Path *cheapest_partial_inner = NULL;
+ Path *cheapest_safe_inner = NULL;
+
+ cheapest_partial_outer =
+ (Path *) linitial(outerrel->partial_pathlist);
+
+ /*
+ * Can we use a partial inner plan too, so that we can build a
+ * shared hash table in parallel? We can't handle
+ * JOIN_UNIQUE_INNER because we can't guarantee uniqueness.
+ */
+ if (innerrel->partial_pathlist != NIL &&
+ save_jointype != JOIN_UNIQUE_INNER &&
+ enable_parallel_hash)
+ {
+ cheapest_partial_inner =
+ (Path *) linitial(innerrel->partial_pathlist);
+ try_partial_hashjoin_path(root, joinrel,
+ cheapest_partial_outer,
+ cheapest_partial_inner,
+ hashclauses, jointype, extra,
+ true /* parallel_hash */ );
+ }
+
+ /*
+ * Normally, given that the joinrel is parallel-safe, the cheapest
+ * total inner path will also be parallel-safe, but if not, we'll
+ * have to search for the cheapest safe, unparameterized inner
+ * path. If doing JOIN_UNIQUE_INNER, we can't use any alternative
+ * inner path.
+ */
+ if (cheapest_total_inner->parallel_safe)
+ cheapest_safe_inner = cheapest_total_inner;
+ else if (save_jointype != JOIN_UNIQUE_INNER)
+ cheapest_safe_inner =
+ get_cheapest_parallel_safe_total_inner(innerrel->pathlist);
+
+ if (cheapest_safe_inner != NULL)
+ try_partial_hashjoin_path(root, joinrel,
+ cheapest_partial_outer,
+ cheapest_safe_inner,
+ hashclauses, jointype, extra,
+ false /* parallel_hash */ );
+ }
+ }
+}
+
+/*
+ * select_mergejoin_clauses
+ * Select mergejoin clauses that are usable for a particular join.
+ * Returns a list of RestrictInfo nodes for those clauses.
+ *
+ * *mergejoin_allowed is normally set to true, but it is set to false if
+ * this is a right/full join and there are nonmergejoinable join clauses.
+ * The executor's mergejoin machinery cannot handle such cases, so we have
+ * to avoid generating a mergejoin plan. (Note that this flag does NOT
+ * consider whether there are actually any mergejoinable clauses. This is
+ * correct because in some cases we need to build a clauseless mergejoin.
+ * Simply returning NIL is therefore not enough to distinguish safe from
+ * unsafe cases.)
+ *
+ * We also mark each selected RestrictInfo to show which side is currently
+ * being considered as outer. These are transient markings that are only
+ * good for the duration of the current add_paths_to_joinrel() call!
+ *
+ * We examine each restrictinfo clause known for the join to see
+ * if it is mergejoinable and involves vars from the two sub-relations
+ * currently of interest.
+ */
+static List *
+select_mergejoin_clauses(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ List *restrictlist,
+ JoinType jointype,
+ bool *mergejoin_allowed)
+{
+ List *result_list = NIL;
+ bool isouterjoin = IS_OUTER_JOIN(jointype);
+ bool have_nonmergeable_joinclause = false;
+ ListCell *l;
+
+ foreach(l, restrictlist)
+ {
+ RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
+
+ /*
+ * If processing an outer join, only use its own join clauses in the
+ * merge. For inner joins we can use pushed-down clauses too. (Note:
+ * we don't set have_nonmergeable_joinclause here because pushed-down
+ * clauses will become otherquals not joinquals.)
+ */
+ if (isouterjoin && RINFO_IS_PUSHED_DOWN(restrictinfo, joinrel->relids))
+ continue;
+
+ /* Check that clause is a mergeable operator clause */
+ if (!restrictinfo->can_join ||
+ restrictinfo->mergeopfamilies == NIL)
+ {
+ /*
+ * The executor can handle extra joinquals that are constants, but
+ * not anything else, when doing right/full merge join. (The
+ * reason to support constants is so we can do FULL JOIN ON
+ * FALSE.)
+ */
+ if (!restrictinfo->clause || !IsA(restrictinfo->clause, Const))
+ have_nonmergeable_joinclause = true;
+ continue; /* not mergejoinable */
+ }
+
+ /*
+ * Check if clause has the form "outer op inner" or "inner op outer".
+ */
+ if (!clause_sides_match_join(restrictinfo, outerrel, innerrel))
+ {
+ have_nonmergeable_joinclause = true;
+ continue; /* no good for these input relations */
+ }
+
+ /*
+ * Insist that each side have a non-redundant eclass. This
+ * restriction is needed because various bits of the planner expect
+ * that each clause in a merge be associable with some pathkey in a
+ * canonical pathkey list, but redundant eclasses can't appear in
+ * canonical sort orderings. (XXX it might be worth relaxing this,
+ * but not enough time to address it for 8.3.)
+ *
+ * Note: it would be bad if this condition failed for an otherwise
+ * mergejoinable FULL JOIN clause, since that would result in
+ * undesirable planner failure. I believe that is not possible
+ * however; a variable involved in a full join could only appear in
+ * below_outer_join eclasses, which aren't considered redundant.
+ *
+ * This case *can* happen for left/right join clauses: the outer-side
+ * variable could be equated to a constant. Because we will propagate
+ * that constant across the join clause, the loss of ability to do a
+ * mergejoin is not really all that big a deal, and so it's not clear
+ * that improving this is important.
+ */
+ update_mergeclause_eclasses(root, restrictinfo);
+
+ if (EC_MUST_BE_REDUNDANT(restrictinfo->left_ec) ||
+ EC_MUST_BE_REDUNDANT(restrictinfo->right_ec))
+ {
+ have_nonmergeable_joinclause = true;
+ continue; /* can't handle redundant eclasses */
+ }
+
+ result_list = lappend(result_list, restrictinfo);
+ }
+
+ /*
+ * Report whether mergejoin is allowed (see comment at top of function).
+ */
+ switch (jointype)
+ {
+ case JOIN_RIGHT:
+ case JOIN_FULL:
+ *mergejoin_allowed = !have_nonmergeable_joinclause;
+ break;
+ default:
+ *mergejoin_allowed = true;
+ break;
+ }
+
+ return result_list;
+}
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
new file mode 100644
index 0000000..9da3ff2
--- /dev/null
+++ b/src/backend/optimizer/path/joinrels.c
@@ -0,0 +1,1783 @@
+/*-------------------------------------------------------------------------
+ *
+ * joinrels.c
+ * Routines to determine which relations should be joined
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/path/joinrels.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "miscadmin.h"
+#include "optimizer/appendinfo.h"
+#include "optimizer/joininfo.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "partitioning/partbounds.h"
+#include "utils/memutils.h"
+
+
+static void make_rels_by_clause_joins(PlannerInfo *root,
+ RelOptInfo *old_rel,
+ List *other_rels_list,
+ ListCell *other_rels);
+static void make_rels_by_clauseless_joins(PlannerInfo *root,
+ RelOptInfo *old_rel,
+ List *other_rels);
+static bool has_join_restriction(PlannerInfo *root, RelOptInfo *rel);
+static bool has_legal_joinclause(PlannerInfo *root, RelOptInfo *rel);
+static bool restriction_is_constant_false(List *restrictlist,
+ RelOptInfo *joinrel,
+ bool only_pushed_down);
+static void populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1,
+ RelOptInfo *rel2, RelOptInfo *joinrel,
+ SpecialJoinInfo *sjinfo, List *restrictlist);
+static void try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1,
+ RelOptInfo *rel2, RelOptInfo *joinrel,
+ SpecialJoinInfo *parent_sjinfo,
+ List *parent_restrictlist);
+static SpecialJoinInfo *build_child_join_sjinfo(PlannerInfo *root,
+ SpecialJoinInfo *parent_sjinfo,
+ Relids left_relids, Relids right_relids);
+static void compute_partition_bounds(PlannerInfo *root, RelOptInfo *rel1,
+ RelOptInfo *rel2, RelOptInfo *joinrel,
+ SpecialJoinInfo *parent_sjinfo,
+ List **parts1, List **parts2);
+static void get_matching_part_pairs(PlannerInfo *root, RelOptInfo *joinrel,
+ RelOptInfo *rel1, RelOptInfo *rel2,
+ List **parts1, List **parts2);
+
+
+/*
+ * join_search_one_level
+ * Consider ways to produce join relations containing exactly 'level'
+ * jointree items. (This is one step of the dynamic-programming method
+ * embodied in standard_join_search.) Join rel nodes for each feasible
+ * combination of lower-level rels are created and returned in a list.
+ * Implementation paths are created for each such joinrel, too.
+ *
+ * level: level of rels we want to make this time
+ * root->join_rel_level[j], 1 <= j < level, is a list of rels containing j items
+ *
+ * The result is returned in root->join_rel_level[level].
+ */
+void
+join_search_one_level(PlannerInfo *root, int level)
+{
+ List **joinrels = root->join_rel_level;
+ ListCell *r;
+ int k;
+
+ Assert(joinrels[level] == NIL);
+
+ /* Set join_cur_level so that new joinrels are added to proper list */
+ root->join_cur_level = level;
+
+ /*
+ * First, consider left-sided and right-sided plans, in which rels of
+ * exactly level-1 member relations are joined against initial relations.
+ * We prefer to join using join clauses, but if we find a rel of level-1
+ * members that has no join clauses, we will generate Cartesian-product
+ * joins against all initial rels not already contained in it.
+ */
+ foreach(r, joinrels[level - 1])
+ {
+ RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
+
+ if (old_rel->joininfo != NIL || old_rel->has_eclass_joins ||
+ has_join_restriction(root, old_rel))
+ {
+ /*
+ * There are join clauses or join order restrictions relevant to
+ * this rel, so consider joins between this rel and (only) those
+ * initial rels it is linked to by a clause or restriction.
+ *
+ * At level 2 this condition is symmetric, so there is no need to
+ * look at initial rels before this one in the list; we already
+ * considered such joins when we were at the earlier rel. (The
+ * mirror-image joins are handled automatically by make_join_rel.)
+ * In later passes (level > 2), we join rels of the previous level
+ * to each initial rel they don't already include but have a join
+ * clause or restriction with.
+ */
+ List *other_rels_list;
+ ListCell *other_rels;
+
+ if (level == 2) /* consider remaining initial rels */
+ {
+ other_rels_list = joinrels[level - 1];
+ other_rels = lnext(other_rels_list, r);
+ }
+ else /* consider all initial rels */
+ {
+ other_rels_list = joinrels[1];
+ other_rels = list_head(other_rels_list);
+ }
+
+ make_rels_by_clause_joins(root,
+ old_rel,
+ other_rels_list,
+ other_rels);
+ }
+ else
+ {
+ /*
+ * Oops, we have a relation that is not joined to any other
+ * relation, either directly or by join-order restrictions.
+ * Cartesian product time.
+ *
+ * We consider a cartesian product with each not-already-included
+ * initial rel, whether it has other join clauses or not. At
+ * level 2, if there are two or more clauseless initial rels, we
+ * will redundantly consider joining them in both directions; but
+ * such cases aren't common enough to justify adding complexity to
+ * avoid the duplicated effort.
+ */
+ make_rels_by_clauseless_joins(root,
+ old_rel,
+ joinrels[1]);
+ }
+ }
+
+ /*
+ * Now, consider "bushy plans" in which relations of k initial rels are
+ * joined to relations of level-k initial rels, for 2 <= k <= level-2.
+ *
+ * We only consider bushy-plan joins for pairs of rels where there is a
+ * suitable join clause (or join order restriction), in order to avoid
+ * unreasonable growth of planning time.
+ */
+ for (k = 2;; k++)
+ {
+ int other_level = level - k;
+
+ /*
+ * Since make_join_rel(x, y) handles both x,y and y,x cases, we only
+ * need to go as far as the halfway point.
+ */
+ if (k > other_level)
+ break;
+
+ foreach(r, joinrels[k])
+ {
+ RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
+ List *other_rels_list;
+ ListCell *other_rels;
+ ListCell *r2;
+
+ /*
+ * We can ignore relations without join clauses here, unless they
+ * participate in join-order restrictions --- then we might have
+ * to force a bushy join plan.
+ */
+ if (old_rel->joininfo == NIL && !old_rel->has_eclass_joins &&
+ !has_join_restriction(root, old_rel))
+ continue;
+
+ if (k == other_level)
+ {
+ /* only consider remaining rels */
+ other_rels_list = joinrels[k];
+ other_rels = lnext(other_rels_list, r);
+ }
+ else
+ {
+ other_rels_list = joinrels[other_level];
+ other_rels = list_head(other_rels_list);
+ }
+
+ for_each_cell(r2, other_rels_list, other_rels)
+ {
+ RelOptInfo *new_rel = (RelOptInfo *) lfirst(r2);
+
+ if (!bms_overlap(old_rel->relids, new_rel->relids))
+ {
+ /*
+ * OK, we can build a rel of the right level from this
+ * pair of rels. Do so if there is at least one relevant
+ * join clause or join order restriction.
+ */
+ if (have_relevant_joinclause(root, old_rel, new_rel) ||
+ have_join_order_restriction(root, old_rel, new_rel))
+ {
+ (void) make_join_rel(root, old_rel, new_rel);
+ }
+ }
+ }
+ }
+ }
+
+ /*----------
+ * Last-ditch effort: if we failed to find any usable joins so far, force
+ * a set of cartesian-product joins to be generated. This handles the
+ * special case where all the available rels have join clauses but we
+ * cannot use any of those clauses yet. This can only happen when we are
+ * considering a join sub-problem (a sub-joinlist) and all the rels in the
+ * sub-problem have only join clauses with rels outside the sub-problem.
+ * An example is
+ *
+ * SELECT ... FROM a INNER JOIN b ON TRUE, c, d, ...
+ * WHERE a.w = c.x and b.y = d.z;
+ *
+ * If the "a INNER JOIN b" sub-problem does not get flattened into the
+ * upper level, we must be willing to make a cartesian join of a and b;
+ * but the code above will not have done so, because it thought that both
+ * a and b have joinclauses. We consider only left-sided and right-sided
+ * cartesian joins in this case (no bushy).
+ *----------
+ */
+ if (joinrels[level] == NIL)
+ {
+ /*
+ * This loop is just like the first one, except we always call
+ * make_rels_by_clauseless_joins().
+ */
+ foreach(r, joinrels[level - 1])
+ {
+ RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
+
+ make_rels_by_clauseless_joins(root,
+ old_rel,
+ joinrels[1]);
+ }
+
+ /*----------
+ * When special joins are involved, there may be no legal way
+ * to make an N-way join for some values of N. For example consider
+ *
+ * SELECT ... FROM t1 WHERE
+ * x IN (SELECT ... FROM t2,t3 WHERE ...) AND
+ * y IN (SELECT ... FROM t4,t5 WHERE ...)
+ *
+ * We will flatten this query to a 5-way join problem, but there are
+ * no 4-way joins that join_is_legal() will consider legal. We have
+ * to accept failure at level 4 and go on to discover a workable
+ * bushy plan at level 5.
+ *
+ * However, if there are no special joins and no lateral references
+ * then join_is_legal() should never fail, and so the following sanity
+ * check is useful.
+ *----------
+ */
+ if (joinrels[level] == NIL &&
+ root->join_info_list == NIL &&
+ !root->hasLateralRTEs)
+ elog(ERROR, "failed to build any %d-way joins", level);
+ }
+}
+
+/*
+ * make_rels_by_clause_joins
+ * Build joins between the given relation 'old_rel' and other relations
+ * that participate in join clauses that 'old_rel' also participates in
+ * (or participate in join-order restrictions with it).
+ * The join rels are returned in root->join_rel_level[join_cur_level].
+ *
+ * Note: at levels above 2 we will generate the same joined relation in
+ * multiple ways --- for example (a join b) join c is the same RelOptInfo as
+ * (b join c) join a, though the second case will add a different set of Paths
+ * to it. This is the reason for using the join_rel_level mechanism, which
+ * automatically ensures that each new joinrel is only added to the list once.
+ *
+ * 'old_rel' is the relation entry for the relation to be joined
+ * 'other_rels_list': a list containing the other
+ * rels to be considered for joining
+ * 'other_rels': the first cell to be considered
+ *
+ * Currently, this is only used with initial rels in other_rels, but it
+ * will work for joining to joinrels too.
+ */
+static void
+make_rels_by_clause_joins(PlannerInfo *root,
+ RelOptInfo *old_rel,
+ List *other_rels_list,
+ ListCell *other_rels)
+{
+ ListCell *l;
+
+ for_each_cell(l, other_rels_list, other_rels)
+ {
+ RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
+
+ if (!bms_overlap(old_rel->relids, other_rel->relids) &&
+ (have_relevant_joinclause(root, old_rel, other_rel) ||
+ have_join_order_restriction(root, old_rel, other_rel)))
+ {
+ (void) make_join_rel(root, old_rel, other_rel);
+ }
+ }
+}
+
+/*
+ * make_rels_by_clauseless_joins
+ * Given a relation 'old_rel' and a list of other relations
+ * 'other_rels', create a join relation between 'old_rel' and each
+ * member of 'other_rels' that isn't already included in 'old_rel'.
+ * The join rels are returned in root->join_rel_level[join_cur_level].
+ *
+ * 'old_rel' is the relation entry for the relation to be joined
+ * 'other_rels': a list containing the other rels to be considered for joining
+ *
+ * Currently, this is only used with initial rels in other_rels, but it would
+ * work for joining to joinrels too.
+ */
+static void
+make_rels_by_clauseless_joins(PlannerInfo *root,
+ RelOptInfo *old_rel,
+ List *other_rels)
+{
+ ListCell *l;
+
+ foreach(l, other_rels)
+ {
+ RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
+
+ if (!bms_overlap(other_rel->relids, old_rel->relids))
+ {
+ (void) make_join_rel(root, old_rel, other_rel);
+ }
+ }
+}
+
+
+/*
+ * join_is_legal
+ * Determine whether a proposed join is legal given the query's
+ * join order constraints; and if it is, determine the join type.
+ *
+ * Caller must supply not only the two rels, but the union of their relids.
+ * (We could simplify the API by computing joinrelids locally, but this
+ * would be redundant work in the normal path through make_join_rel.)
+ *
+ * On success, *sjinfo_p is set to NULL if this is to be a plain inner join,
+ * else it's set to point to the associated SpecialJoinInfo node. Also,
+ * *reversed_p is set true if the given relations need to be swapped to
+ * match the SpecialJoinInfo node.
+ */
+static bool
+join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
+ Relids joinrelids,
+ SpecialJoinInfo **sjinfo_p, bool *reversed_p)
+{
+ SpecialJoinInfo *match_sjinfo;
+ bool reversed;
+ bool unique_ified;
+ bool must_be_leftjoin;
+ ListCell *l;
+
+ /*
+ * Ensure output params are set on failure return. This is just to
+ * suppress uninitialized-variable warnings from overly anal compilers.
+ */
+ *sjinfo_p = NULL;
+ *reversed_p = false;
+
+ /*
+ * If we have any special joins, the proposed join might be illegal; and
+ * in any case we have to determine its join type. Scan the join info
+ * list for matches and conflicts.
+ */
+ match_sjinfo = NULL;
+ reversed = false;
+ unique_ified = false;
+ must_be_leftjoin = false;
+
+ foreach(l, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
+
+ /*
+ * This special join is not relevant unless its RHS overlaps the
+ * proposed join. (Check this first as a fast path for dismissing
+ * most irrelevant SJs quickly.)
+ */
+ if (!bms_overlap(sjinfo->min_righthand, joinrelids))
+ continue;
+
+ /*
+ * Also, not relevant if proposed join is fully contained within RHS
+ * (ie, we're still building up the RHS).
+ */
+ if (bms_is_subset(joinrelids, sjinfo->min_righthand))
+ continue;
+
+ /*
+ * Also, not relevant if SJ is already done within either input.
+ */
+ if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
+ bms_is_subset(sjinfo->min_righthand, rel1->relids))
+ continue;
+ if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
+ bms_is_subset(sjinfo->min_righthand, rel2->relids))
+ continue;
+
+ /*
+ * If it's a semijoin and we already joined the RHS to any other rels
+ * within either input, then we must have unique-ified the RHS at that
+ * point (see below). Therefore the semijoin is no longer relevant in
+ * this join path.
+ */
+ if (sjinfo->jointype == JOIN_SEMI)
+ {
+ if (bms_is_subset(sjinfo->syn_righthand, rel1->relids) &&
+ !bms_equal(sjinfo->syn_righthand, rel1->relids))
+ continue;
+ if (bms_is_subset(sjinfo->syn_righthand, rel2->relids) &&
+ !bms_equal(sjinfo->syn_righthand, rel2->relids))
+ continue;
+ }
+
+ /*
+ * If one input contains min_lefthand and the other contains
+ * min_righthand, then we can perform the SJ at this join.
+ *
+ * Reject if we get matches to more than one SJ; that implies we're
+ * considering something that's not really valid.
+ */
+ if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
+ bms_is_subset(sjinfo->min_righthand, rel2->relids))
+ {
+ if (match_sjinfo)
+ return false; /* invalid join path */
+ match_sjinfo = sjinfo;
+ reversed = false;
+ }
+ else if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
+ bms_is_subset(sjinfo->min_righthand, rel1->relids))
+ {
+ if (match_sjinfo)
+ return false; /* invalid join path */
+ match_sjinfo = sjinfo;
+ reversed = true;
+ }
+ else if (sjinfo->jointype == JOIN_SEMI &&
+ bms_equal(sjinfo->syn_righthand, rel2->relids) &&
+ create_unique_path(root, rel2, rel2->cheapest_total_path,
+ sjinfo) != NULL)
+ {
+ /*----------
+ * For a semijoin, we can join the RHS to anything else by
+ * unique-ifying the RHS (if the RHS can be unique-ified).
+ * We will only get here if we have the full RHS but less
+ * than min_lefthand on the LHS.
+ *
+ * The reason to consider such a join path is exemplified by
+ * SELECT ... FROM a,b WHERE (a.x,b.y) IN (SELECT c1,c2 FROM c)
+ * If we insist on doing this as a semijoin we will first have
+ * to form the cartesian product of A*B. But if we unique-ify
+ * C then the semijoin becomes a plain innerjoin and we can join
+ * in any order, eg C to A and then to B. When C is much smaller
+ * than A and B this can be a huge win. So we allow C to be
+ * joined to just A or just B here, and then make_join_rel has
+ * to handle the case properly.
+ *
+ * Note that actually we'll allow unique-ified C to be joined to
+ * some other relation D here, too. That is legal, if usually not
+ * very sane, and this routine is only concerned with legality not
+ * with whether the join is good strategy.
+ *----------
+ */
+ if (match_sjinfo)
+ return false; /* invalid join path */
+ match_sjinfo = sjinfo;
+ reversed = false;
+ unique_ified = true;
+ }
+ else if (sjinfo->jointype == JOIN_SEMI &&
+ bms_equal(sjinfo->syn_righthand, rel1->relids) &&
+ create_unique_path(root, rel1, rel1->cheapest_total_path,
+ sjinfo) != NULL)
+ {
+ /* Reversed semijoin case */
+ if (match_sjinfo)
+ return false; /* invalid join path */
+ match_sjinfo = sjinfo;
+ reversed = true;
+ unique_ified = true;
+ }
+ else
+ {
+ /*
+ * Otherwise, the proposed join overlaps the RHS but isn't a valid
+ * implementation of this SJ. But don't panic quite yet: the RHS
+ * violation might have occurred previously, in one or both input
+ * relations, in which case we must have previously decided that
+ * it was OK to commute some other SJ with this one. If we need
+ * to perform this join to finish building up the RHS, rejecting
+ * it could lead to not finding any plan at all. (This can occur
+ * because of the heuristics elsewhere in this file that postpone
+ * clauseless joins: we might not consider doing a clauseless join
+ * within the RHS until after we've performed other, validly
+ * commutable SJs with one or both sides of the clauseless join.)
+ * This consideration boils down to the rule that if both inputs
+ * overlap the RHS, we can allow the join --- they are either
+ * fully within the RHS, or represent previously-allowed joins to
+ * rels outside it.
+ */
+ if (bms_overlap(rel1->relids, sjinfo->min_righthand) &&
+ bms_overlap(rel2->relids, sjinfo->min_righthand))
+ continue; /* assume valid previous violation of RHS */
+
+ /*
+ * The proposed join could still be legal, but only if we're
+ * allowed to associate it into the RHS of this SJ. That means
+ * this SJ must be a LEFT join (not SEMI or ANTI, and certainly
+ * not FULL) and the proposed join must not overlap the LHS.
+ */
+ if (sjinfo->jointype != JOIN_LEFT ||
+ bms_overlap(joinrelids, sjinfo->min_lefthand))
+ return false; /* invalid join path */
+
+ /*
+ * To be valid, the proposed join must be a LEFT join; otherwise
+ * it can't associate into this SJ's RHS. But we may not yet have
+ * found the SpecialJoinInfo matching the proposed join, so we
+ * can't test that yet. Remember the requirement for later.
+ */
+ must_be_leftjoin = true;
+ }
+ }
+
+ /*
+ * Fail if violated any SJ's RHS and didn't match to a LEFT SJ: the
+ * proposed join can't associate into an SJ's RHS.
+ *
+ * Also, fail if the proposed join's predicate isn't strict; we're
+ * essentially checking to see if we can apply outer-join identity 3, and
+ * that's a requirement. (This check may be redundant with checks in
+ * make_outerjoininfo, but I'm not quite sure, and it's cheap to test.)
+ */
+ if (must_be_leftjoin &&
+ (match_sjinfo == NULL ||
+ match_sjinfo->jointype != JOIN_LEFT ||
+ !match_sjinfo->lhs_strict))
+ return false; /* invalid join path */
+
+ /*
+ * We also have to check for constraints imposed by LATERAL references.
+ */
+ if (root->hasLateralRTEs)
+ {
+ bool lateral_fwd;
+ bool lateral_rev;
+ Relids join_lateral_rels;
+
+ /*
+ * The proposed rels could each contain lateral references to the
+ * other, in which case the join is impossible. If there are lateral
+ * references in just one direction, then the join has to be done with
+ * a nestloop with the lateral referencer on the inside. If the join
+ * matches an SJ that cannot be implemented by such a nestloop, the
+ * join is impossible.
+ *
+ * Also, if the lateral reference is only indirect, we should reject
+ * the join; whatever rel(s) the reference chain goes through must be
+ * joined to first.
+ *
+ * Another case that might keep us from building a valid plan is the
+ * implementation restriction described by have_dangerous_phv().
+ */
+ lateral_fwd = bms_overlap(rel1->relids, rel2->lateral_relids);
+ lateral_rev = bms_overlap(rel2->relids, rel1->lateral_relids);
+ if (lateral_fwd && lateral_rev)
+ return false; /* have lateral refs in both directions */
+ if (lateral_fwd)
+ {
+ /* has to be implemented as nestloop with rel1 on left */
+ if (match_sjinfo &&
+ (reversed ||
+ unique_ified ||
+ match_sjinfo->jointype == JOIN_FULL))
+ return false; /* not implementable as nestloop */
+ /* check there is a direct reference from rel2 to rel1 */
+ if (!bms_overlap(rel1->relids, rel2->direct_lateral_relids))
+ return false; /* only indirect refs, so reject */
+ /* check we won't have a dangerous PHV */
+ if (have_dangerous_phv(root, rel1->relids, rel2->lateral_relids))
+ return false; /* might be unable to handle required PHV */
+ }
+ else if (lateral_rev)
+ {
+ /* has to be implemented as nestloop with rel2 on left */
+ if (match_sjinfo &&
+ (!reversed ||
+ unique_ified ||
+ match_sjinfo->jointype == JOIN_FULL))
+ return false; /* not implementable as nestloop */
+ /* check there is a direct reference from rel1 to rel2 */
+ if (!bms_overlap(rel2->relids, rel1->direct_lateral_relids))
+ return false; /* only indirect refs, so reject */
+ /* check we won't have a dangerous PHV */
+ if (have_dangerous_phv(root, rel2->relids, rel1->lateral_relids))
+ return false; /* might be unable to handle required PHV */
+ }
+
+ /*
+ * LATERAL references could also cause problems later on if we accept
+ * this join: if the join's minimum parameterization includes any rels
+ * that would have to be on the inside of an outer join with this join
+ * rel, then it's never going to be possible to build the complete
+ * query using this join. We should reject this join not only because
+ * it'll save work, but because if we don't, the clauseless-join
+ * heuristics might think that legality of this join means that some
+ * other join rel need not be formed, and that could lead to failure
+ * to find any plan at all. We have to consider not only rels that
+ * are directly on the inner side of an OJ with the joinrel, but also
+ * ones that are indirectly so, so search to find all such rels.
+ */
+ join_lateral_rels = min_join_parameterization(root, joinrelids,
+ rel1, rel2);
+ if (join_lateral_rels)
+ {
+ Relids join_plus_rhs = bms_copy(joinrelids);
+ bool more;
+
+ do
+ {
+ more = false;
+ foreach(l, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
+
+ /* ignore full joins --- their ordering is predetermined */
+ if (sjinfo->jointype == JOIN_FULL)
+ continue;
+
+ if (bms_overlap(sjinfo->min_lefthand, join_plus_rhs) &&
+ !bms_is_subset(sjinfo->min_righthand, join_plus_rhs))
+ {
+ join_plus_rhs = bms_add_members(join_plus_rhs,
+ sjinfo->min_righthand);
+ more = true;
+ }
+ }
+ } while (more);
+ if (bms_overlap(join_plus_rhs, join_lateral_rels))
+ return false; /* will not be able to join to some RHS rel */
+ }
+ }
+
+ /* Otherwise, it's a valid join */
+ *sjinfo_p = match_sjinfo;
+ *reversed_p = reversed;
+ return true;
+}
+
+
+/*
+ * make_join_rel
+ * Find or create a join RelOptInfo that represents the join of
+ * the two given rels, and add to it path information for paths
+ * created with the two rels as outer and inner rel.
+ * (The join rel may already contain paths generated from other
+ * pairs of rels that add up to the same set of base rels.)
+ *
+ * NB: will return NULL if attempted join is not valid. This can happen
+ * when working with outer joins, or with IN or EXISTS clauses that have been
+ * turned into joins.
+ */
+RelOptInfo *
+make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
+{
+ Relids joinrelids;
+ SpecialJoinInfo *sjinfo;
+ bool reversed;
+ SpecialJoinInfo sjinfo_data;
+ RelOptInfo *joinrel;
+ List *restrictlist;
+
+ /* We should never try to join two overlapping sets of rels. */
+ Assert(!bms_overlap(rel1->relids, rel2->relids));
+
+ /* Construct Relids set that identifies the joinrel. */
+ joinrelids = bms_union(rel1->relids, rel2->relids);
+
+ /* Check validity and determine join type. */
+ if (!join_is_legal(root, rel1, rel2, joinrelids,
+ &sjinfo, &reversed))
+ {
+ /* invalid join path */
+ bms_free(joinrelids);
+ return NULL;
+ }
+
+ /* Swap rels if needed to match the join info. */
+ if (reversed)
+ {
+ RelOptInfo *trel = rel1;
+
+ rel1 = rel2;
+ rel2 = trel;
+ }
+
+ /*
+ * If it's a plain inner join, then we won't have found anything in
+ * join_info_list. Make up a SpecialJoinInfo so that selectivity
+ * estimation functions will know what's being joined.
+ */
+ if (sjinfo == NULL)
+ {
+ sjinfo = &sjinfo_data;
+ sjinfo->type = T_SpecialJoinInfo;
+ sjinfo->min_lefthand = rel1->relids;
+ sjinfo->min_righthand = rel2->relids;
+ sjinfo->syn_lefthand = rel1->relids;
+ sjinfo->syn_righthand = rel2->relids;
+ sjinfo->jointype = JOIN_INNER;
+ /* we don't bother trying to make the remaining fields valid */
+ sjinfo->lhs_strict = false;
+ sjinfo->delay_upper_joins = false;
+ sjinfo->semi_can_btree = false;
+ sjinfo->semi_can_hash = false;
+ sjinfo->semi_operators = NIL;
+ sjinfo->semi_rhs_exprs = NIL;
+ }
+
+ /*
+ * Find or build the join RelOptInfo, and compute the restrictlist that
+ * goes with this particular joining.
+ */
+ joinrel = build_join_rel(root, joinrelids, rel1, rel2, sjinfo,
+ &restrictlist);
+
+ /*
+ * If we've already proven this join is empty, we needn't consider any
+ * more paths for it.
+ */
+ if (is_dummy_rel(joinrel))
+ {
+ bms_free(joinrelids);
+ return joinrel;
+ }
+
+ /* Add paths to the join relation. */
+ populate_joinrel_with_paths(root, rel1, rel2, joinrel, sjinfo,
+ restrictlist);
+
+ bms_free(joinrelids);
+
+ return joinrel;
+}
+
+/*
+ * populate_joinrel_with_paths
+ * Add paths to the given joinrel for given pair of joining relations. The
+ * SpecialJoinInfo provides details about the join and the restrictlist
+ * contains the join clauses and the other clauses applicable for given pair
+ * of the joining relations.
+ */
+static void
+populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1,
+ RelOptInfo *rel2, RelOptInfo *joinrel,
+ SpecialJoinInfo *sjinfo, List *restrictlist)
+{
+ /*
+ * Consider paths using each rel as both outer and inner. Depending on
+ * the join type, a provably empty outer or inner rel might mean the join
+ * is provably empty too; in which case throw away any previously computed
+ * paths and mark the join as dummy. (We do it this way since it's
+ * conceivable that dummy-ness of a multi-element join might only be
+ * noticeable for certain construction paths.)
+ *
+ * Also, a provably constant-false join restriction typically means that
+ * we can skip evaluating one or both sides of the join. We do this by
+ * marking the appropriate rel as dummy. For outer joins, a
+ * constant-false restriction that is pushed down still means the whole
+ * join is dummy, while a non-pushed-down one means that no inner rows
+ * will join so we can treat the inner rel as dummy.
+ *
+ * We need only consider the jointypes that appear in join_info_list, plus
+ * JOIN_INNER.
+ */
+ switch (sjinfo->jointype)
+ {
+ case JOIN_INNER:
+ if (is_dummy_rel(rel1) || is_dummy_rel(rel2) ||
+ restriction_is_constant_false(restrictlist, joinrel, false))
+ {
+ mark_dummy_rel(joinrel);
+ break;
+ }
+ add_paths_to_joinrel(root, joinrel, rel1, rel2,
+ JOIN_INNER, sjinfo,
+ restrictlist);
+ add_paths_to_joinrel(root, joinrel, rel2, rel1,
+ JOIN_INNER, sjinfo,
+ restrictlist);
+ break;
+ case JOIN_LEFT:
+ if (is_dummy_rel(rel1) ||
+ restriction_is_constant_false(restrictlist, joinrel, true))
+ {
+ mark_dummy_rel(joinrel);
+ break;
+ }
+ if (restriction_is_constant_false(restrictlist, joinrel, false) &&
+ bms_is_subset(rel2->relids, sjinfo->syn_righthand))
+ mark_dummy_rel(rel2);
+ add_paths_to_joinrel(root, joinrel, rel1, rel2,
+ JOIN_LEFT, sjinfo,
+ restrictlist);
+ add_paths_to_joinrel(root, joinrel, rel2, rel1,
+ JOIN_RIGHT, sjinfo,
+ restrictlist);
+ break;
+ case JOIN_FULL:
+ if ((is_dummy_rel(rel1) && is_dummy_rel(rel2)) ||
+ restriction_is_constant_false(restrictlist, joinrel, true))
+ {
+ mark_dummy_rel(joinrel);
+ break;
+ }
+ add_paths_to_joinrel(root, joinrel, rel1, rel2,
+ JOIN_FULL, sjinfo,
+ restrictlist);
+ add_paths_to_joinrel(root, joinrel, rel2, rel1,
+ JOIN_FULL, sjinfo,
+ restrictlist);
+
+ /*
+ * If there are join quals that aren't mergeable or hashable, we
+ * may not be able to build any valid plan. Complain here so that
+ * we can give a somewhat-useful error message. (Since we have no
+ * flexibility of planning for a full join, there's no chance of
+ * succeeding later with another pair of input rels.)
+ */
+ if (joinrel->pathlist == NIL)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("FULL JOIN is only supported with merge-joinable or hash-joinable join conditions")));
+ break;
+ case JOIN_SEMI:
+
+ /*
+ * We might have a normal semijoin, or a case where we don't have
+ * enough rels to do the semijoin but can unique-ify the RHS and
+ * then do an innerjoin (see comments in join_is_legal). In the
+ * latter case we can't apply JOIN_SEMI joining.
+ */
+ if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
+ bms_is_subset(sjinfo->min_righthand, rel2->relids))
+ {
+ if (is_dummy_rel(rel1) || is_dummy_rel(rel2) ||
+ restriction_is_constant_false(restrictlist, joinrel, false))
+ {
+ mark_dummy_rel(joinrel);
+ break;
+ }
+ add_paths_to_joinrel(root, joinrel, rel1, rel2,
+ JOIN_SEMI, sjinfo,
+ restrictlist);
+ }
+
+ /*
+ * If we know how to unique-ify the RHS and one input rel is
+ * exactly the RHS (not a superset) we can consider unique-ifying
+ * it and then doing a regular join. (The create_unique_path
+ * check here is probably redundant with what join_is_legal did,
+ * but if so the check is cheap because it's cached. So test
+ * anyway to be sure.)
+ */
+ if (bms_equal(sjinfo->syn_righthand, rel2->relids) &&
+ create_unique_path(root, rel2, rel2->cheapest_total_path,
+ sjinfo) != NULL)
+ {
+ if (is_dummy_rel(rel1) || is_dummy_rel(rel2) ||
+ restriction_is_constant_false(restrictlist, joinrel, false))
+ {
+ mark_dummy_rel(joinrel);
+ break;
+ }
+ add_paths_to_joinrel(root, joinrel, rel1, rel2,
+ JOIN_UNIQUE_INNER, sjinfo,
+ restrictlist);
+ add_paths_to_joinrel(root, joinrel, rel2, rel1,
+ JOIN_UNIQUE_OUTER, sjinfo,
+ restrictlist);
+ }
+ break;
+ case JOIN_ANTI:
+ if (is_dummy_rel(rel1) ||
+ restriction_is_constant_false(restrictlist, joinrel, true))
+ {
+ mark_dummy_rel(joinrel);
+ break;
+ }
+ if (restriction_is_constant_false(restrictlist, joinrel, false) &&
+ bms_is_subset(rel2->relids, sjinfo->syn_righthand))
+ mark_dummy_rel(rel2);
+ add_paths_to_joinrel(root, joinrel, rel1, rel2,
+ JOIN_ANTI, sjinfo,
+ restrictlist);
+ break;
+ default:
+ /* other values not expected here */
+ elog(ERROR, "unrecognized join type: %d", (int) sjinfo->jointype);
+ break;
+ }
+
+ /* Apply partitionwise join technique, if possible. */
+ try_partitionwise_join(root, rel1, rel2, joinrel, sjinfo, restrictlist);
+}
+
+
+/*
+ * have_join_order_restriction
+ * Detect whether the two relations should be joined to satisfy
+ * a join-order restriction arising from special or lateral joins.
+ *
+ * In practice this is always used with have_relevant_joinclause(), and so
+ * could be merged with that function, but it seems clearer to separate the
+ * two concerns. We need this test because there are degenerate cases where
+ * a clauseless join must be performed to satisfy join-order restrictions.
+ * Also, if one rel has a lateral reference to the other, or both are needed
+ * to compute some PHV, we should consider joining them even if the join would
+ * be clauseless.
+ *
+ * Note: this is only a problem if one side of a degenerate outer join
+ * contains multiple rels, or a clauseless join is required within an
+ * IN/EXISTS RHS; else we will find a join path via the "last ditch" case in
+ * join_search_one_level(). We could dispense with this test if we were
+ * willing to try bushy plans in the "last ditch" case, but that seems much
+ * less efficient.
+ */
+bool
+have_join_order_restriction(PlannerInfo *root,
+ RelOptInfo *rel1, RelOptInfo *rel2)
+{
+ bool result = false;
+ ListCell *l;
+
+ /*
+ * If either side has a direct lateral reference to the other, attempt the
+ * join regardless of outer-join considerations.
+ */
+ if (bms_overlap(rel1->relids, rel2->direct_lateral_relids) ||
+ bms_overlap(rel2->relids, rel1->direct_lateral_relids))
+ return true;
+
+ /*
+ * Likewise, if both rels are needed to compute some PlaceHolderVar,
+ * attempt the join regardless of outer-join considerations. (This is not
+ * very desirable, because a PHV with a large eval_at set will cause a lot
+ * of probably-useless joins to be considered, but failing to do this can
+ * cause us to fail to construct a plan at all.)
+ */
+ foreach(l, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l);
+
+ if (bms_is_subset(rel1->relids, phinfo->ph_eval_at) &&
+ bms_is_subset(rel2->relids, phinfo->ph_eval_at))
+ return true;
+ }
+
+ /*
+ * It's possible that the rels correspond to the left and right sides of a
+ * degenerate outer join, that is, one with no joinclause mentioning the
+ * non-nullable side; in which case we should force the join to occur.
+ *
+ * Also, the two rels could represent a clauseless join that has to be
+ * completed to build up the LHS or RHS of an outer join.
+ */
+ foreach(l, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
+
+ /* ignore full joins --- other mechanisms handle them */
+ if (sjinfo->jointype == JOIN_FULL)
+ continue;
+
+ /* Can we perform the SJ with these rels? */
+ if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
+ bms_is_subset(sjinfo->min_righthand, rel2->relids))
+ {
+ result = true;
+ break;
+ }
+ if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
+ bms_is_subset(sjinfo->min_righthand, rel1->relids))
+ {
+ result = true;
+ break;
+ }
+
+ /*
+ * Might we need to join these rels to complete the RHS? We have to
+ * use "overlap" tests since either rel might include a lower SJ that
+ * has been proven to commute with this one.
+ */
+ if (bms_overlap(sjinfo->min_righthand, rel1->relids) &&
+ bms_overlap(sjinfo->min_righthand, rel2->relids))
+ {
+ result = true;
+ break;
+ }
+
+ /* Likewise for the LHS. */
+ if (bms_overlap(sjinfo->min_lefthand, rel1->relids) &&
+ bms_overlap(sjinfo->min_lefthand, rel2->relids))
+ {
+ result = true;
+ break;
+ }
+ }
+
+ /*
+ * We do not force the join to occur if either input rel can legally be
+ * joined to anything else using joinclauses. This essentially means that
+ * clauseless bushy joins are put off as long as possible. The reason is
+ * that when there is a join order restriction high up in the join tree
+ * (that is, with many rels inside the LHS or RHS), we would otherwise
+ * expend lots of effort considering very stupid join combinations within
+ * its LHS or RHS.
+ */
+ if (result)
+ {
+ if (has_legal_joinclause(root, rel1) ||
+ has_legal_joinclause(root, rel2))
+ result = false;
+ }
+
+ return result;
+}
+
+
+/*
+ * has_join_restriction
+ * Detect whether the specified relation has join-order restrictions,
+ * due to being inside an outer join or an IN (sub-SELECT),
+ * or participating in any LATERAL references or multi-rel PHVs.
+ *
+ * Essentially, this tests whether have_join_order_restriction() could
+ * succeed with this rel and some other one. It's OK if we sometimes
+ * say "true" incorrectly. (Therefore, we don't bother with the relatively
+ * expensive has_legal_joinclause test.)
+ */
+static bool
+has_join_restriction(PlannerInfo *root, RelOptInfo *rel)
+{
+ ListCell *l;
+
+ if (rel->lateral_relids != NULL || rel->lateral_referencers != NULL)
+ return true;
+
+ foreach(l, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l);
+
+ if (bms_is_subset(rel->relids, phinfo->ph_eval_at) &&
+ !bms_equal(rel->relids, phinfo->ph_eval_at))
+ return true;
+ }
+
+ foreach(l, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
+
+ /* ignore full joins --- other mechanisms preserve their ordering */
+ if (sjinfo->jointype == JOIN_FULL)
+ continue;
+
+ /* ignore if SJ is already contained in rel */
+ if (bms_is_subset(sjinfo->min_lefthand, rel->relids) &&
+ bms_is_subset(sjinfo->min_righthand, rel->relids))
+ continue;
+
+ /* restricted if it overlaps LHS or RHS, but doesn't contain SJ */
+ if (bms_overlap(sjinfo->min_lefthand, rel->relids) ||
+ bms_overlap(sjinfo->min_righthand, rel->relids))
+ return true;
+ }
+
+ return false;
+}
+
+
+/*
+ * has_legal_joinclause
+ * Detect whether the specified relation can legally be joined
+ * to any other rels using join clauses.
+ *
+ * We consider only joins to single other relations in the current
+ * initial_rels list. This is sufficient to get a "true" result in most real
+ * queries, and an occasional erroneous "false" will only cost a bit more
+ * planning time. The reason for this limitation is that considering joins to
+ * other joins would require proving that the other join rel can legally be
+ * formed, which seems like too much trouble for something that's only a
+ * heuristic to save planning time. (Note: we must look at initial_rels
+ * and not all of the query, since when we are planning a sub-joinlist we
+ * may be forced to make clauseless joins within initial_rels even though
+ * there are join clauses linking to other parts of the query.)
+ */
+static bool
+has_legal_joinclause(PlannerInfo *root, RelOptInfo *rel)
+{
+ ListCell *lc;
+
+ foreach(lc, root->initial_rels)
+ {
+ RelOptInfo *rel2 = (RelOptInfo *) lfirst(lc);
+
+ /* ignore rels that are already in "rel" */
+ if (bms_overlap(rel->relids, rel2->relids))
+ continue;
+
+ if (have_relevant_joinclause(root, rel, rel2))
+ {
+ Relids joinrelids;
+ SpecialJoinInfo *sjinfo;
+ bool reversed;
+
+ /* join_is_legal needs relids of the union */
+ joinrelids = bms_union(rel->relids, rel2->relids);
+
+ if (join_is_legal(root, rel, rel2, joinrelids,
+ &sjinfo, &reversed))
+ {
+ /* Yes, this will work */
+ bms_free(joinrelids);
+ return true;
+ }
+
+ bms_free(joinrelids);
+ }
+ }
+
+ return false;
+}
+
+
+/*
+ * There's a pitfall for creating parameterized nestloops: suppose the inner
+ * rel (call it A) has a parameter that is a PlaceHolderVar, and that PHV's
+ * minimum eval_at set includes the outer rel (B) and some third rel (C).
+ * We might think we could create a B/A nestloop join that's parameterized by
+ * C. But we would end up with a plan in which the PHV's expression has to be
+ * evaluated as a nestloop parameter at the B/A join; and the executor is only
+ * set up to handle simple Vars as NestLoopParams. Rather than add complexity
+ * and overhead to the executor for such corner cases, it seems better to
+ * forbid the join. (Note that we can still make use of A's parameterized
+ * path with pre-joined B+C as the outer rel. have_join_order_restriction()
+ * ensures that we will consider making such a join even if there are not
+ * other reasons to do so.)
+ *
+ * So we check whether any PHVs used in the query could pose such a hazard.
+ * We don't have any simple way of checking whether a risky PHV would actually
+ * be used in the inner plan, and the case is so unusual that it doesn't seem
+ * worth working very hard on it.
+ *
+ * This needs to be checked in two places. If the inner rel's minimum
+ * parameterization would trigger the restriction, then join_is_legal() should
+ * reject the join altogether, because there will be no workable paths for it.
+ * But joinpath.c has to check again for every proposed nestloop path, because
+ * the inner path might have more than the minimum parameterization, causing
+ * some PHV to be dangerous for it that otherwise wouldn't be.
+ */
+bool
+have_dangerous_phv(PlannerInfo *root,
+ Relids outer_relids, Relids inner_params)
+{
+ ListCell *lc;
+
+ foreach(lc, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
+
+ if (!bms_is_subset(phinfo->ph_eval_at, inner_params))
+ continue; /* ignore, could not be a nestloop param */
+ if (!bms_overlap(phinfo->ph_eval_at, outer_relids))
+ continue; /* ignore, not relevant to this join */
+ if (bms_is_subset(phinfo->ph_eval_at, outer_relids))
+ continue; /* safe, it can be eval'd within outerrel */
+ /* Otherwise, it's potentially unsafe, so reject the join */
+ return true;
+ }
+
+ /* OK to perform the join */
+ return false;
+}
+
+
+/*
+ * is_dummy_rel --- has relation been proven empty?
+ */
+bool
+is_dummy_rel(RelOptInfo *rel)
+{
+ Path *path;
+
+ /*
+ * A rel that is known dummy will have just one path that is a childless
+ * Append. (Even if somehow it has more paths, a childless Append will
+ * have cost zero and hence should be at the front of the pathlist.)
+ */
+ if (rel->pathlist == NIL)
+ return false;
+ path = (Path *) linitial(rel->pathlist);
+
+ /*
+ * Initially, a dummy path will just be a childless Append. But in later
+ * planning stages we might stick a ProjectSetPath and/or ProjectionPath
+ * on top, since Append can't project. Rather than make assumptions about
+ * which combinations can occur, just descend through whatever we find.
+ */
+ for (;;)
+ {
+ if (IsA(path, ProjectionPath))
+ path = ((ProjectionPath *) path)->subpath;
+ else if (IsA(path, ProjectSetPath))
+ path = ((ProjectSetPath *) path)->subpath;
+ else
+ break;
+ }
+ if (IS_DUMMY_APPEND(path))
+ return true;
+ return false;
+}
+
+/*
+ * Mark a relation as proven empty.
+ *
+ * During GEQO planning, this can get invoked more than once on the same
+ * baserel struct, so it's worth checking to see if the rel is already marked
+ * dummy.
+ *
+ * Also, when called during GEQO join planning, we are in a short-lived
+ * memory context. We must make sure that the dummy path attached to a
+ * baserel survives the GEQO cycle, else the baserel is trashed for future
+ * GEQO cycles. On the other hand, when we are marking a joinrel during GEQO,
+ * we don't want the dummy path to clutter the main planning context. Upshot
+ * is that the best solution is to explicitly make the dummy path in the same
+ * context the given RelOptInfo is in.
+ */
+void
+mark_dummy_rel(RelOptInfo *rel)
+{
+ MemoryContext oldcontext;
+
+ /* Already marked? */
+ if (is_dummy_rel(rel))
+ return;
+
+ /* No, so choose correct context to make the dummy path in */
+ oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
+
+ /* Set dummy size estimate */
+ rel->rows = 0;
+
+ /* Evict any previously chosen paths */
+ rel->pathlist = NIL;
+ rel->partial_pathlist = NIL;
+
+ /* Set up the dummy path */
+ add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL,
+ NIL, rel->lateral_relids,
+ 0, false, -1));
+
+ /* Set or update cheapest_total_path and related fields */
+ set_cheapest(rel);
+
+ MemoryContextSwitchTo(oldcontext);
+}
+
+
+/*
+ * restriction_is_constant_false --- is a restrictlist just FALSE?
+ *
+ * In cases where a qual is provably constant FALSE, eval_const_expressions
+ * will generally have thrown away anything that's ANDed with it. In outer
+ * join situations this will leave us computing cartesian products only to
+ * decide there's no match for an outer row, which is pretty stupid. So,
+ * we need to detect the case.
+ *
+ * If only_pushed_down is true, then consider only quals that are pushed-down
+ * from the point of view of the joinrel.
+ */
+static bool
+restriction_is_constant_false(List *restrictlist,
+ RelOptInfo *joinrel,
+ bool only_pushed_down)
+{
+ ListCell *lc;
+
+ /*
+ * Despite the above comment, the restriction list we see here might
+ * possibly have other members besides the FALSE constant, since other
+ * quals could get "pushed down" to the outer join level. So we check
+ * each member of the list.
+ */
+ foreach(lc, restrictlist)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
+
+ if (only_pushed_down && !RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
+ continue;
+
+ if (rinfo->clause && IsA(rinfo->clause, Const))
+ {
+ Const *con = (Const *) rinfo->clause;
+
+ /* constant NULL is as good as constant FALSE for our purposes */
+ if (con->constisnull)
+ return true;
+ if (!DatumGetBool(con->constvalue))
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Assess whether join between given two partitioned relations can be broken
+ * down into joins between matching partitions; a technique called
+ * "partitionwise join"
+ *
+ * Partitionwise join is possible when a. Joining relations have same
+ * partitioning scheme b. There exists an equi-join between the partition keys
+ * of the two relations.
+ *
+ * Partitionwise join is planned as follows (details: optimizer/README.)
+ *
+ * 1. Create the RelOptInfos for joins between matching partitions i.e
+ * child-joins and add paths to them.
+ *
+ * 2. Construct Append or MergeAppend paths across the set of child joins.
+ * This second phase is implemented by generate_partitionwise_join_paths().
+ *
+ * The RelOptInfo, SpecialJoinInfo and restrictlist for each child join are
+ * obtained by translating the respective parent join structures.
+ */
+static void
+try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
+ RelOptInfo *joinrel, SpecialJoinInfo *parent_sjinfo,
+ List *parent_restrictlist)
+{
+ bool rel1_is_simple = IS_SIMPLE_REL(rel1);
+ bool rel2_is_simple = IS_SIMPLE_REL(rel2);
+ List *parts1 = NIL;
+ List *parts2 = NIL;
+ ListCell *lcr1 = NULL;
+ ListCell *lcr2 = NULL;
+ int cnt_parts;
+
+ /* Guard against stack overflow due to overly deep partition hierarchy. */
+ check_stack_depth();
+
+ /* Nothing to do, if the join relation is not partitioned. */
+ if (joinrel->part_scheme == NULL || joinrel->nparts == 0)
+ return;
+
+ /* The join relation should have consider_partitionwise_join set. */
+ Assert(joinrel->consider_partitionwise_join);
+
+ /*
+ * We can not perform partitionwise join if either of the joining
+ * relations is not partitioned.
+ */
+ if (!IS_PARTITIONED_REL(rel1) || !IS_PARTITIONED_REL(rel2))
+ return;
+
+ Assert(REL_HAS_ALL_PART_PROPS(rel1) && REL_HAS_ALL_PART_PROPS(rel2));
+
+ /* The joining relations should have consider_partitionwise_join set. */
+ Assert(rel1->consider_partitionwise_join &&
+ rel2->consider_partitionwise_join);
+
+ /*
+ * The partition scheme of the join relation should match that of the
+ * joining relations.
+ */
+ Assert(joinrel->part_scheme == rel1->part_scheme &&
+ joinrel->part_scheme == rel2->part_scheme);
+
+ Assert(!(joinrel->partbounds_merged && (joinrel->nparts <= 0)));
+
+ compute_partition_bounds(root, rel1, rel2, joinrel, parent_sjinfo,
+ &parts1, &parts2);
+
+ if (joinrel->partbounds_merged)
+ {
+ lcr1 = list_head(parts1);
+ lcr2 = list_head(parts2);
+ }
+
+ /*
+ * Create child-join relations for this partitioned join, if those don't
+ * exist. Add paths to child-joins for a pair of child relations
+ * corresponding to the given pair of parent relations.
+ */
+ for (cnt_parts = 0; cnt_parts < joinrel->nparts; cnt_parts++)
+ {
+ RelOptInfo *child_rel1;
+ RelOptInfo *child_rel2;
+ bool rel1_empty;
+ bool rel2_empty;
+ SpecialJoinInfo *child_sjinfo;
+ List *child_restrictlist;
+ RelOptInfo *child_joinrel;
+ Relids child_joinrelids;
+ AppendRelInfo **appinfos;
+ int nappinfos;
+
+ if (joinrel->partbounds_merged)
+ {
+ child_rel1 = lfirst_node(RelOptInfo, lcr1);
+ child_rel2 = lfirst_node(RelOptInfo, lcr2);
+ lcr1 = lnext(parts1, lcr1);
+ lcr2 = lnext(parts2, lcr2);
+ }
+ else
+ {
+ child_rel1 = rel1->part_rels[cnt_parts];
+ child_rel2 = rel2->part_rels[cnt_parts];
+ }
+
+ rel1_empty = (child_rel1 == NULL || IS_DUMMY_REL(child_rel1));
+ rel2_empty = (child_rel2 == NULL || IS_DUMMY_REL(child_rel2));
+
+ /*
+ * Check for cases where we can prove that this segment of the join
+ * returns no rows, due to one or both inputs being empty (including
+ * inputs that have been pruned away entirely). If so just ignore it.
+ * These rules are equivalent to populate_joinrel_with_paths's rules
+ * for dummy input relations.
+ */
+ switch (parent_sjinfo->jointype)
+ {
+ case JOIN_INNER:
+ case JOIN_SEMI:
+ if (rel1_empty || rel2_empty)
+ continue; /* ignore this join segment */
+ break;
+ case JOIN_LEFT:
+ case JOIN_ANTI:
+ if (rel1_empty)
+ continue; /* ignore this join segment */
+ break;
+ case JOIN_FULL:
+ if (rel1_empty && rel2_empty)
+ continue; /* ignore this join segment */
+ break;
+ default:
+ /* other values not expected here */
+ elog(ERROR, "unrecognized join type: %d",
+ (int) parent_sjinfo->jointype);
+ break;
+ }
+
+ /*
+ * If a child has been pruned entirely then we can't generate paths
+ * for it, so we have to reject partitionwise joining unless we were
+ * able to eliminate this partition above.
+ */
+ if (child_rel1 == NULL || child_rel2 == NULL)
+ {
+ /*
+ * Mark the joinrel as unpartitioned so that later functions treat
+ * it correctly.
+ */
+ joinrel->nparts = 0;
+ return;
+ }
+
+ /*
+ * If a leaf relation has consider_partitionwise_join=false, it means
+ * that it's a dummy relation for which we skipped setting up tlist
+ * expressions and adding EC members in set_append_rel_size(), so
+ * again we have to fail here.
+ */
+ if (rel1_is_simple && !child_rel1->consider_partitionwise_join)
+ {
+ Assert(child_rel1->reloptkind == RELOPT_OTHER_MEMBER_REL);
+ Assert(IS_DUMMY_REL(child_rel1));
+ joinrel->nparts = 0;
+ return;
+ }
+ if (rel2_is_simple && !child_rel2->consider_partitionwise_join)
+ {
+ Assert(child_rel2->reloptkind == RELOPT_OTHER_MEMBER_REL);
+ Assert(IS_DUMMY_REL(child_rel2));
+ joinrel->nparts = 0;
+ return;
+ }
+
+ /* We should never try to join two overlapping sets of rels. */
+ Assert(!bms_overlap(child_rel1->relids, child_rel2->relids));
+ child_joinrelids = bms_union(child_rel1->relids, child_rel2->relids);
+ appinfos = find_appinfos_by_relids(root, child_joinrelids, &nappinfos);
+
+ /*
+ * Construct SpecialJoinInfo from parent join relations's
+ * SpecialJoinInfo.
+ */
+ child_sjinfo = build_child_join_sjinfo(root, parent_sjinfo,
+ child_rel1->relids,
+ child_rel2->relids);
+
+ /*
+ * Construct restrictions applicable to the child join from those
+ * applicable to the parent join.
+ */
+ child_restrictlist =
+ (List *) adjust_appendrel_attrs(root,
+ (Node *) parent_restrictlist,
+ nappinfos, appinfos);
+ pfree(appinfos);
+
+ child_joinrel = joinrel->part_rels[cnt_parts];
+ if (!child_joinrel)
+ {
+ child_joinrel = build_child_join_rel(root, child_rel1, child_rel2,
+ joinrel, child_restrictlist,
+ child_sjinfo,
+ child_sjinfo->jointype);
+ joinrel->part_rels[cnt_parts] = child_joinrel;
+ joinrel->live_parts = bms_add_member(joinrel->live_parts, cnt_parts);
+ joinrel->all_partrels = bms_add_members(joinrel->all_partrels,
+ child_joinrel->relids);
+ }
+
+ Assert(bms_equal(child_joinrel->relids, child_joinrelids));
+
+ populate_joinrel_with_paths(root, child_rel1, child_rel2,
+ child_joinrel, child_sjinfo,
+ child_restrictlist);
+ }
+}
+
+/*
+ * Construct the SpecialJoinInfo for a child-join by translating
+ * SpecialJoinInfo for the join between parents. left_relids and right_relids
+ * are the relids of left and right side of the join respectively.
+ */
+static SpecialJoinInfo *
+build_child_join_sjinfo(PlannerInfo *root, SpecialJoinInfo *parent_sjinfo,
+ Relids left_relids, Relids right_relids)
+{
+ SpecialJoinInfo *sjinfo = makeNode(SpecialJoinInfo);
+ AppendRelInfo **left_appinfos;
+ int left_nappinfos;
+ AppendRelInfo **right_appinfos;
+ int right_nappinfos;
+
+ memcpy(sjinfo, parent_sjinfo, sizeof(SpecialJoinInfo));
+ left_appinfos = find_appinfos_by_relids(root, left_relids,
+ &left_nappinfos);
+ right_appinfos = find_appinfos_by_relids(root, right_relids,
+ &right_nappinfos);
+
+ sjinfo->min_lefthand = adjust_child_relids(sjinfo->min_lefthand,
+ left_nappinfos, left_appinfos);
+ sjinfo->min_righthand = adjust_child_relids(sjinfo->min_righthand,
+ right_nappinfos,
+ right_appinfos);
+ sjinfo->syn_lefthand = adjust_child_relids(sjinfo->syn_lefthand,
+ left_nappinfos, left_appinfos);
+ sjinfo->syn_righthand = adjust_child_relids(sjinfo->syn_righthand,
+ right_nappinfos,
+ right_appinfos);
+ sjinfo->semi_rhs_exprs = (List *) adjust_appendrel_attrs(root,
+ (Node *) sjinfo->semi_rhs_exprs,
+ right_nappinfos,
+ right_appinfos);
+
+ pfree(left_appinfos);
+ pfree(right_appinfos);
+
+ return sjinfo;
+}
+
+/*
+ * compute_partition_bounds
+ * Compute the partition bounds for a join rel from those for inputs
+ */
+static void
+compute_partition_bounds(PlannerInfo *root, RelOptInfo *rel1,
+ RelOptInfo *rel2, RelOptInfo *joinrel,
+ SpecialJoinInfo *parent_sjinfo,
+ List **parts1, List **parts2)
+{
+ /*
+ * If we don't have the partition bounds for the join rel yet, try to
+ * compute those along with pairs of partitions to be joined.
+ */
+ if (joinrel->nparts == -1)
+ {
+ PartitionScheme part_scheme = joinrel->part_scheme;
+ PartitionBoundInfo boundinfo = NULL;
+ int nparts = 0;
+
+ Assert(joinrel->boundinfo == NULL);
+ Assert(joinrel->part_rels == NULL);
+
+ /*
+ * See if the partition bounds for inputs are exactly the same, in
+ * which case we don't need to work hard: the join rel will have the
+ * same partition bounds as inputs, and the partitions with the same
+ * cardinal positions will form the pairs.
+ *
+ * Note: even in cases where one or both inputs have merged bounds, it
+ * would be possible for both the bounds to be exactly the same, but
+ * it seems unlikely to be worth the cycles to check.
+ */
+ if (!rel1->partbounds_merged &&
+ !rel2->partbounds_merged &&
+ rel1->nparts == rel2->nparts &&
+ partition_bounds_equal(part_scheme->partnatts,
+ part_scheme->parttyplen,
+ part_scheme->parttypbyval,
+ rel1->boundinfo, rel2->boundinfo))
+ {
+ boundinfo = rel1->boundinfo;
+ nparts = rel1->nparts;
+ }
+ else
+ {
+ /* Try merging the partition bounds for inputs. */
+ boundinfo = partition_bounds_merge(part_scheme->partnatts,
+ part_scheme->partsupfunc,
+ part_scheme->partcollation,
+ rel1, rel2,
+ parent_sjinfo->jointype,
+ parts1, parts2);
+ if (boundinfo == NULL)
+ {
+ joinrel->nparts = 0;
+ return;
+ }
+ nparts = list_length(*parts1);
+ joinrel->partbounds_merged = true;
+ }
+
+ Assert(nparts > 0);
+ joinrel->boundinfo = boundinfo;
+ joinrel->nparts = nparts;
+ joinrel->part_rels =
+ (RelOptInfo **) palloc0(sizeof(RelOptInfo *) * nparts);
+ }
+ else
+ {
+ Assert(joinrel->nparts > 0);
+ Assert(joinrel->boundinfo);
+ Assert(joinrel->part_rels);
+
+ /*
+ * If the join rel's partbounds_merged flag is true, it means inputs
+ * are not guaranteed to have the same partition bounds, therefore we
+ * can't assume that the partitions at the same cardinal positions
+ * form the pairs; let get_matching_part_pairs() generate the pairs.
+ * Otherwise, nothing to do since we can assume that.
+ */
+ if (joinrel->partbounds_merged)
+ {
+ get_matching_part_pairs(root, joinrel, rel1, rel2,
+ parts1, parts2);
+ Assert(list_length(*parts1) == joinrel->nparts);
+ Assert(list_length(*parts2) == joinrel->nparts);
+ }
+ }
+}
+
+/*
+ * get_matching_part_pairs
+ * Generate pairs of partitions to be joined from inputs
+ */
+static void
+get_matching_part_pairs(PlannerInfo *root, RelOptInfo *joinrel,
+ RelOptInfo *rel1, RelOptInfo *rel2,
+ List **parts1, List **parts2)
+{
+ bool rel1_is_simple = IS_SIMPLE_REL(rel1);
+ bool rel2_is_simple = IS_SIMPLE_REL(rel2);
+ int cnt_parts;
+
+ *parts1 = NIL;
+ *parts2 = NIL;
+
+ for (cnt_parts = 0; cnt_parts < joinrel->nparts; cnt_parts++)
+ {
+ RelOptInfo *child_joinrel = joinrel->part_rels[cnt_parts];
+ RelOptInfo *child_rel1;
+ RelOptInfo *child_rel2;
+ Relids child_relids1;
+ Relids child_relids2;
+
+ /*
+ * If this segment of the join is empty, it means that this segment
+ * was ignored when previously creating child-join paths for it in
+ * try_partitionwise_join() as it would not contribute to the join
+ * result, due to one or both inputs being empty; add NULL to each of
+ * the given lists so that this segment will be ignored again in that
+ * function.
+ */
+ if (!child_joinrel)
+ {
+ *parts1 = lappend(*parts1, NULL);
+ *parts2 = lappend(*parts2, NULL);
+ continue;
+ }
+
+ /*
+ * Get a relids set of partition(s) involved in this join segment that
+ * are from the rel1 side.
+ */
+ child_relids1 = bms_intersect(child_joinrel->relids,
+ rel1->all_partrels);
+ Assert(bms_num_members(child_relids1) == bms_num_members(rel1->relids));
+
+ /*
+ * Get a child rel for rel1 with the relids. Note that we should have
+ * the child rel even if rel1 is a join rel, because in that case the
+ * partitions specified in the relids would have matching/overlapping
+ * boundaries, so the specified partitions should be considered as
+ * ones to be joined when planning partitionwise joins of rel1,
+ * meaning that the child rel would have been built by the time we get
+ * here.
+ */
+ if (rel1_is_simple)
+ {
+ int varno = bms_singleton_member(child_relids1);
+
+ child_rel1 = find_base_rel(root, varno);
+ }
+ else
+ child_rel1 = find_join_rel(root, child_relids1);
+ Assert(child_rel1);
+
+ /*
+ * Get a relids set of partition(s) involved in this join segment that
+ * are from the rel2 side.
+ */
+ child_relids2 = bms_intersect(child_joinrel->relids,
+ rel2->all_partrels);
+ Assert(bms_num_members(child_relids2) == bms_num_members(rel2->relids));
+
+ /*
+ * Get a child rel for rel2 with the relids. See above comments.
+ */
+ if (rel2_is_simple)
+ {
+ int varno = bms_singleton_member(child_relids2);
+
+ child_rel2 = find_base_rel(root, varno);
+ }
+ else
+ child_rel2 = find_join_rel(root, child_relids2);
+ Assert(child_rel2);
+
+ /*
+ * The join of rel1 and rel2 is legal, so is the join of the child
+ * rels obtained above; add them to the given lists as a join pair
+ * producing this join segment.
+ */
+ *parts1 = lappend(*parts1, child_rel1);
+ *parts2 = lappend(*parts2, child_rel2);
+ }
+}
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
new file mode 100644
index 0000000..86a35cd
--- /dev/null
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -0,0 +1,1917 @@
+/*-------------------------------------------------------------------------
+ *
+ * pathkeys.c
+ * Utilities for matching and building path keys
+ *
+ * See src/backend/optimizer/README for a great deal of information about
+ * the nature and use of path keys.
+ *
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/path/pathkeys.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/stratnum.h"
+#include "catalog/pg_opfamily.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "nodes/plannodes.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "partitioning/partbounds.h"
+#include "utils/lsyscache.h"
+
+
+static bool pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys);
+static bool matches_boolean_partition_clause(RestrictInfo *rinfo,
+ RelOptInfo *partrel,
+ int partkeycol);
+static Var *find_var_for_subquery_tle(RelOptInfo *rel, TargetEntry *tle);
+static bool right_merge_direction(PlannerInfo *root, PathKey *pathkey);
+
+
+/****************************************************************************
+ * PATHKEY CONSTRUCTION AND REDUNDANCY TESTING
+ ****************************************************************************/
+
+/*
+ * make_canonical_pathkey
+ * Given the parameters for a PathKey, find any pre-existing matching
+ * pathkey in the query's list of "canonical" pathkeys. Make a new
+ * entry if there's not one already.
+ *
+ * Note that this function must not be used until after we have completed
+ * merging EquivalenceClasses.
+ */
+PathKey *
+make_canonical_pathkey(PlannerInfo *root,
+ EquivalenceClass *eclass, Oid opfamily,
+ int strategy, bool nulls_first)
+{
+ PathKey *pk;
+ ListCell *lc;
+ MemoryContext oldcontext;
+
+ /* Can't make canonical pathkeys if the set of ECs might still change */
+ if (!root->ec_merging_done)
+ elog(ERROR, "too soon to build canonical pathkeys");
+
+ /* The passed eclass might be non-canonical, so chase up to the top */
+ while (eclass->ec_merged)
+ eclass = eclass->ec_merged;
+
+ foreach(lc, root->canon_pathkeys)
+ {
+ pk = (PathKey *) lfirst(lc);
+ if (eclass == pk->pk_eclass &&
+ opfamily == pk->pk_opfamily &&
+ strategy == pk->pk_strategy &&
+ nulls_first == pk->pk_nulls_first)
+ return pk;
+ }
+
+ /*
+ * Be sure canonical pathkeys are allocated in the main planning context.
+ * Not an issue in normal planning, but it is for GEQO.
+ */
+ oldcontext = MemoryContextSwitchTo(root->planner_cxt);
+
+ pk = makeNode(PathKey);
+ pk->pk_eclass = eclass;
+ pk->pk_opfamily = opfamily;
+ pk->pk_strategy = strategy;
+ pk->pk_nulls_first = nulls_first;
+
+ root->canon_pathkeys = lappend(root->canon_pathkeys, pk);
+
+ MemoryContextSwitchTo(oldcontext);
+
+ return pk;
+}
+
+/*
+ * pathkey_is_redundant
+ * Is a pathkey redundant with one already in the given list?
+ *
+ * We detect two cases:
+ *
+ * 1. If the new pathkey's equivalence class contains a constant, and isn't
+ * below an outer join, then we can disregard it as a sort key. An example:
+ * SELECT ... WHERE x = 42 ORDER BY x, y;
+ * We may as well just sort by y. Note that because of opfamily matching,
+ * this is semantically correct: we know that the equality constraint is one
+ * that actually binds the variable to a single value in the terms of any
+ * ordering operator that might go with the eclass. This rule not only lets
+ * us simplify (or even skip) explicit sorts, but also allows matching index
+ * sort orders to a query when there are don't-care index columns.
+ *
+ * 2. If the new pathkey's equivalence class is the same as that of any
+ * existing member of the pathkey list, then it is redundant. Some examples:
+ * SELECT ... ORDER BY x, x;
+ * SELECT ... ORDER BY x, x DESC;
+ * SELECT ... WHERE x = y ORDER BY x, y;
+ * In all these cases the second sort key cannot distinguish values that are
+ * considered equal by the first, and so there's no point in using it.
+ * Note in particular that we need not compare opfamily (all the opfamilies
+ * of the EC have the same notion of equality) nor sort direction.
+ *
+ * Both the given pathkey and the list members must be canonical for this
+ * to work properly, but that's okay since we no longer ever construct any
+ * non-canonical pathkeys. (Note: the notion of a pathkey *list* being
+ * canonical includes the additional requirement of no redundant entries,
+ * which is exactly what we are checking for here.)
+ *
+ * Because the equivclass.c machinery forms only one copy of any EC per query,
+ * pointer comparison is enough to decide whether canonical ECs are the same.
+ */
+static bool
+pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys)
+{
+ EquivalenceClass *new_ec = new_pathkey->pk_eclass;
+ ListCell *lc;
+
+ /* Check for EC containing a constant --- unconditionally redundant */
+ if (EC_MUST_BE_REDUNDANT(new_ec))
+ return true;
+
+ /* If same EC already used in list, then redundant */
+ foreach(lc, pathkeys)
+ {
+ PathKey *old_pathkey = (PathKey *) lfirst(lc);
+
+ if (new_ec == old_pathkey->pk_eclass)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * make_pathkey_from_sortinfo
+ * Given an expression and sort-order information, create a PathKey.
+ * The result is always a "canonical" PathKey, but it might be redundant.
+ *
+ * expr is the expression, and nullable_relids is the set of base relids
+ * that are potentially nullable below it.
+ *
+ * If the PathKey is being generated from a SortGroupClause, sortref should be
+ * the SortGroupClause's SortGroupRef; otherwise zero.
+ *
+ * If rel is not NULL, it identifies a specific relation we're considering
+ * a path for, and indicates that child EC members for that relation can be
+ * considered. Otherwise child members are ignored. (See the comments for
+ * get_eclass_for_sort_expr.)
+ *
+ * create_it is true if we should create any missing EquivalenceClass
+ * needed to represent the sort key. If it's false, we return NULL if the
+ * sort key isn't already present in any EquivalenceClass.
+ */
+static PathKey *
+make_pathkey_from_sortinfo(PlannerInfo *root,
+ Expr *expr,
+ Relids nullable_relids,
+ Oid opfamily,
+ Oid opcintype,
+ Oid collation,
+ bool reverse_sort,
+ bool nulls_first,
+ Index sortref,
+ Relids rel,
+ bool create_it)
+{
+ int16 strategy;
+ Oid equality_op;
+ List *opfamilies;
+ EquivalenceClass *eclass;
+
+ strategy = reverse_sort ? BTGreaterStrategyNumber : BTLessStrategyNumber;
+
+ /*
+ * EquivalenceClasses need to contain opfamily lists based on the family
+ * membership of mergejoinable equality operators, which could belong to
+ * more than one opfamily. So we have to look up the opfamily's equality
+ * operator and get its membership.
+ */
+ equality_op = get_opfamily_member(opfamily,
+ opcintype,
+ opcintype,
+ BTEqualStrategyNumber);
+ if (!OidIsValid(equality_op)) /* shouldn't happen */
+ elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
+ BTEqualStrategyNumber, opcintype, opcintype, opfamily);
+ opfamilies = get_mergejoin_opfamilies(equality_op);
+ if (!opfamilies) /* certainly should find some */
+ elog(ERROR, "could not find opfamilies for equality operator %u",
+ equality_op);
+
+ /* Now find or (optionally) create a matching EquivalenceClass */
+ eclass = get_eclass_for_sort_expr(root, expr, nullable_relids,
+ opfamilies, opcintype, collation,
+ sortref, rel, create_it);
+
+ /* Fail if no EC and !create_it */
+ if (!eclass)
+ return NULL;
+
+ /* And finally we can find or create a PathKey node */
+ return make_canonical_pathkey(root, eclass, opfamily,
+ strategy, nulls_first);
+}
+
+/*
+ * make_pathkey_from_sortop
+ * Like make_pathkey_from_sortinfo, but work from a sort operator.
+ *
+ * This should eventually go away, but we need to restructure SortGroupClause
+ * first.
+ */
+static PathKey *
+make_pathkey_from_sortop(PlannerInfo *root,
+ Expr *expr,
+ Relids nullable_relids,
+ Oid ordering_op,
+ bool nulls_first,
+ Index sortref,
+ bool create_it)
+{
+ Oid opfamily,
+ opcintype,
+ collation;
+ int16 strategy;
+
+ /* Find the operator in pg_amop --- failure shouldn't happen */
+ if (!get_ordering_op_properties(ordering_op,
+ &opfamily, &opcintype, &strategy))
+ elog(ERROR, "operator %u is not a valid ordering operator",
+ ordering_op);
+
+ /* Because SortGroupClause doesn't carry collation, consult the expr */
+ collation = exprCollation((Node *) expr);
+
+ return make_pathkey_from_sortinfo(root,
+ expr,
+ nullable_relids,
+ opfamily,
+ opcintype,
+ collation,
+ (strategy == BTGreaterStrategyNumber),
+ nulls_first,
+ sortref,
+ NULL,
+ create_it);
+}
+
+
+/****************************************************************************
+ * PATHKEY COMPARISONS
+ ****************************************************************************/
+
+/*
+ * compare_pathkeys
+ * Compare two pathkeys to see if they are equivalent, and if not whether
+ * one is "better" than the other.
+ *
+ * We assume the pathkeys are canonical, and so they can be checked for
+ * equality by simple pointer comparison.
+ */
+PathKeysComparison
+compare_pathkeys(List *keys1, List *keys2)
+{
+ ListCell *key1,
+ *key2;
+
+ /*
+ * Fall out quickly if we are passed two identical lists. This mostly
+ * catches the case where both are NIL, but that's common enough to
+ * warrant the test.
+ */
+ if (keys1 == keys2)
+ return PATHKEYS_EQUAL;
+
+ forboth(key1, keys1, key2, keys2)
+ {
+ PathKey *pathkey1 = (PathKey *) lfirst(key1);
+ PathKey *pathkey2 = (PathKey *) lfirst(key2);
+
+ if (pathkey1 != pathkey2)
+ return PATHKEYS_DIFFERENT; /* no need to keep looking */
+ }
+
+ /*
+ * If we reached the end of only one list, the other is longer and
+ * therefore not a subset.
+ */
+ if (key1 != NULL)
+ return PATHKEYS_BETTER1; /* key1 is longer */
+ if (key2 != NULL)
+ return PATHKEYS_BETTER2; /* key2 is longer */
+ return PATHKEYS_EQUAL;
+}
+
+/*
+ * pathkeys_contained_in
+ * Common special case of compare_pathkeys: we just want to know
+ * if keys2 are at least as well sorted as keys1.
+ */
+bool
+pathkeys_contained_in(List *keys1, List *keys2)
+{
+ switch (compare_pathkeys(keys1, keys2))
+ {
+ case PATHKEYS_EQUAL:
+ case PATHKEYS_BETTER2:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+/*
+ * pathkeys_count_contained_in
+ * Same as pathkeys_contained_in, but also sets length of longest
+ * common prefix of keys1 and keys2.
+ */
+bool
+pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
+{
+ int n = 0;
+ ListCell *key1,
+ *key2;
+
+ /*
+ * See if we can avoiding looping through both lists. This optimization
+ * gains us several percent in planning time in a worst-case test.
+ */
+ if (keys1 == keys2)
+ {
+ *n_common = list_length(keys1);
+ return true;
+ }
+ else if (keys1 == NIL)
+ {
+ *n_common = 0;
+ return true;
+ }
+ else if (keys2 == NIL)
+ {
+ *n_common = 0;
+ return false;
+ }
+
+ /*
+ * If both lists are non-empty, iterate through both to find out how many
+ * items are shared.
+ */
+ forboth(key1, keys1, key2, keys2)
+ {
+ PathKey *pathkey1 = (PathKey *) lfirst(key1);
+ PathKey *pathkey2 = (PathKey *) lfirst(key2);
+
+ if (pathkey1 != pathkey2)
+ {
+ *n_common = n;
+ return false;
+ }
+ n++;
+ }
+
+ /* If we ended with a null value, then we've processed the whole list. */
+ *n_common = n;
+ return (key1 == NULL);
+}
+
+/*
+ * get_cheapest_path_for_pathkeys
+ * Find the cheapest path (according to the specified criterion) that
+ * satisfies the given pathkeys and parameterization.
+ * Return NULL if no such path.
+ *
+ * 'paths' is a list of possible paths that all generate the same relation
+ * 'pathkeys' represents a required ordering (in canonical form!)
+ * 'required_outer' denotes allowable outer relations for parameterized paths
+ * 'cost_criterion' is STARTUP_COST or TOTAL_COST
+ * 'require_parallel_safe' causes us to consider only parallel-safe paths
+ */
+Path *
+get_cheapest_path_for_pathkeys(List *paths, List *pathkeys,
+ Relids required_outer,
+ CostSelector cost_criterion,
+ bool require_parallel_safe)
+{
+ Path *matched_path = NULL;
+ ListCell *l;
+
+ foreach(l, paths)
+ {
+ Path *path = (Path *) lfirst(l);
+
+ /*
+ * Since cost comparison is a lot cheaper than pathkey comparison, do
+ * that first. (XXX is that still true?)
+ */
+ if (matched_path != NULL &&
+ compare_path_costs(matched_path, path, cost_criterion) <= 0)
+ continue;
+
+ if (require_parallel_safe && !path->parallel_safe)
+ continue;
+
+ if (pathkeys_contained_in(pathkeys, path->pathkeys) &&
+ bms_is_subset(PATH_REQ_OUTER(path), required_outer))
+ matched_path = path;
+ }
+ return matched_path;
+}
+
+/*
+ * get_cheapest_fractional_path_for_pathkeys
+ * Find the cheapest path (for retrieving a specified fraction of all
+ * the tuples) that satisfies the given pathkeys and parameterization.
+ * Return NULL if no such path.
+ *
+ * See compare_fractional_path_costs() for the interpretation of the fraction
+ * parameter.
+ *
+ * 'paths' is a list of possible paths that all generate the same relation
+ * 'pathkeys' represents a required ordering (in canonical form!)
+ * 'required_outer' denotes allowable outer relations for parameterized paths
+ * 'fraction' is the fraction of the total tuples expected to be retrieved
+ */
+Path *
+get_cheapest_fractional_path_for_pathkeys(List *paths,
+ List *pathkeys,
+ Relids required_outer,
+ double fraction)
+{
+ Path *matched_path = NULL;
+ ListCell *l;
+
+ foreach(l, paths)
+ {
+ Path *path = (Path *) lfirst(l);
+
+ /*
+ * Since cost comparison is a lot cheaper than pathkey comparison, do
+ * that first. (XXX is that still true?)
+ */
+ if (matched_path != NULL &&
+ compare_fractional_path_costs(matched_path, path, fraction) <= 0)
+ continue;
+
+ if (pathkeys_contained_in(pathkeys, path->pathkeys) &&
+ bms_is_subset(PATH_REQ_OUTER(path), required_outer))
+ matched_path = path;
+ }
+ return matched_path;
+}
+
+
+/*
+ * get_cheapest_parallel_safe_total_inner
+ * Find the unparameterized parallel-safe path with the least total cost.
+ */
+Path *
+get_cheapest_parallel_safe_total_inner(List *paths)
+{
+ ListCell *l;
+
+ foreach(l, paths)
+ {
+ Path *innerpath = (Path *) lfirst(l);
+
+ if (innerpath->parallel_safe &&
+ bms_is_empty(PATH_REQ_OUTER(innerpath)))
+ return innerpath;
+ }
+
+ return NULL;
+}
+
+/****************************************************************************
+ * NEW PATHKEY FORMATION
+ ****************************************************************************/
+
+/*
+ * build_index_pathkeys
+ * Build a pathkeys list that describes the ordering induced by an index
+ * scan using the given index. (Note that an unordered index doesn't
+ * induce any ordering, so we return NIL.)
+ *
+ * If 'scandir' is BackwardScanDirection, build pathkeys representing a
+ * backwards scan of the index.
+ *
+ * We iterate only key columns of covering indexes, since non-key columns
+ * don't influence index ordering. The result is canonical, meaning that
+ * redundant pathkeys are removed; it may therefore have fewer entries than
+ * there are key columns in the index.
+ *
+ * Another reason for stopping early is that we may be able to tell that
+ * an index column's sort order is uninteresting for this query. However,
+ * that test is just based on the existence of an EquivalenceClass and not
+ * on position in pathkey lists, so it's not complete. Caller should call
+ * truncate_useless_pathkeys() to possibly remove more pathkeys.
+ */
+List *
+build_index_pathkeys(PlannerInfo *root,
+ IndexOptInfo *index,
+ ScanDirection scandir)
+{
+ List *retval = NIL;
+ ListCell *lc;
+ int i;
+
+ if (index->sortopfamily == NULL)
+ return NIL; /* non-orderable index */
+
+ i = 0;
+ foreach(lc, index->indextlist)
+ {
+ TargetEntry *indextle = (TargetEntry *) lfirst(lc);
+ Expr *indexkey;
+ bool reverse_sort;
+ bool nulls_first;
+ PathKey *cpathkey;
+
+ /*
+ * INCLUDE columns are stored in index unordered, so they don't
+ * support ordered index scan.
+ */
+ if (i >= index->nkeycolumns)
+ break;
+
+ /* We assume we don't need to make a copy of the tlist item */
+ indexkey = indextle->expr;
+
+ if (ScanDirectionIsBackward(scandir))
+ {
+ reverse_sort = !index->reverse_sort[i];
+ nulls_first = !index->nulls_first[i];
+ }
+ else
+ {
+ reverse_sort = index->reverse_sort[i];
+ nulls_first = index->nulls_first[i];
+ }
+
+ /*
+ * OK, try to make a canonical pathkey for this sort key. Note we're
+ * underneath any outer joins, so nullable_relids should be NULL.
+ */
+ cpathkey = make_pathkey_from_sortinfo(root,
+ indexkey,
+ NULL,
+ index->sortopfamily[i],
+ index->opcintype[i],
+ index->indexcollations[i],
+ reverse_sort,
+ nulls_first,
+ 0,
+ index->rel->relids,
+ false);
+
+ if (cpathkey)
+ {
+ /*
+ * We found the sort key in an EquivalenceClass, so it's relevant
+ * for this query. Add it to list, unless it's redundant.
+ */
+ if (!pathkey_is_redundant(cpathkey, retval))
+ retval = lappend(retval, cpathkey);
+ }
+ else
+ {
+ /*
+ * Boolean index keys might be redundant even if they do not
+ * appear in an EquivalenceClass, because of our special treatment
+ * of boolean equality conditions --- see the comment for
+ * indexcol_is_bool_constant_for_query(). If that applies, we can
+ * continue to examine lower-order index columns. Otherwise, the
+ * sort key is not an interesting sort order for this query, so we
+ * should stop considering index columns; any lower-order sort
+ * keys won't be useful either.
+ */
+ if (!indexcol_is_bool_constant_for_query(root, index, i))
+ break;
+ }
+
+ i++;
+ }
+
+ return retval;
+}
+
+/*
+ * partkey_is_bool_constant_for_query
+ *
+ * If a partition key column is constrained to have a constant value by the
+ * query's WHERE conditions, then it's irrelevant for sort-order
+ * considerations. Usually that means we have a restriction clause
+ * WHERE partkeycol = constant, which gets turned into an EquivalenceClass
+ * containing a constant, which is recognized as redundant by
+ * build_partition_pathkeys(). But if the partition key column is a
+ * boolean variable (or expression), then we are not going to see such a
+ * WHERE clause, because expression preprocessing will have simplified it
+ * to "WHERE partkeycol" or "WHERE NOT partkeycol". So we are not going
+ * to have a matching EquivalenceClass (unless the query also contains
+ * "ORDER BY partkeycol"). To allow such cases to work the same as they would
+ * for non-boolean values, this function is provided to detect whether the
+ * specified partition key column matches a boolean restriction clause.
+ */
+static bool
+partkey_is_bool_constant_for_query(RelOptInfo *partrel, int partkeycol)
+{
+ PartitionScheme partscheme = partrel->part_scheme;
+ ListCell *lc;
+
+ /* If the partkey isn't boolean, we can't possibly get a match */
+ if (!IsBooleanOpfamily(partscheme->partopfamily[partkeycol]))
+ return false;
+
+ /* Check each restriction clause for the partitioned rel */
+ foreach(lc, partrel->baserestrictinfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ /* Ignore pseudoconstant quals, they won't match */
+ if (rinfo->pseudoconstant)
+ continue;
+
+ /* See if we can match the clause's expression to the partkey column */
+ if (matches_boolean_partition_clause(rinfo, partrel, partkeycol))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * matches_boolean_partition_clause
+ * Determine if the boolean clause described by rinfo matches
+ * partrel's partkeycol-th partition key column.
+ *
+ * "Matches" can be either an exact match (equivalent to partkey = true),
+ * or a NOT above an exact match (equivalent to partkey = false).
+ */
+static bool
+matches_boolean_partition_clause(RestrictInfo *rinfo,
+ RelOptInfo *partrel, int partkeycol)
+{
+ Node *clause = (Node *) rinfo->clause;
+ Node *partexpr = (Node *) linitial(partrel->partexprs[partkeycol]);
+
+ /* Direct match? */
+ if (equal(partexpr, clause))
+ return true;
+ /* NOT clause? */
+ else if (is_notclause(clause))
+ {
+ Node *arg = (Node *) get_notclausearg((Expr *) clause);
+
+ if (equal(partexpr, arg))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * build_partition_pathkeys
+ * Build a pathkeys list that describes the ordering induced by the
+ * partitions of partrel, under either forward or backward scan
+ * as per scandir.
+ *
+ * Caller must have checked that the partitions are properly ordered,
+ * as detected by partitions_are_ordered().
+ *
+ * Sets *partialkeys to true if pathkeys were only built for a prefix of the
+ * partition key, or false if the pathkeys include all columns of the
+ * partition key.
+ */
+List *
+build_partition_pathkeys(PlannerInfo *root, RelOptInfo *partrel,
+ ScanDirection scandir, bool *partialkeys)
+{
+ List *retval = NIL;
+ PartitionScheme partscheme = partrel->part_scheme;
+ int i;
+
+ Assert(partscheme != NULL);
+ Assert(partitions_are_ordered(partrel->boundinfo, partrel->live_parts));
+ /* For now, we can only cope with baserels */
+ Assert(IS_SIMPLE_REL(partrel));
+
+ for (i = 0; i < partscheme->partnatts; i++)
+ {
+ PathKey *cpathkey;
+ Expr *keyCol = (Expr *) linitial(partrel->partexprs[i]);
+
+ /*
+ * Try to make a canonical pathkey for this partkey.
+ *
+ * We're considering a baserel scan, so nullable_relids should be
+ * NULL. Also, we assume the PartitionDesc lists any NULL partition
+ * last, so we treat the scan like a NULLS LAST index: we have
+ * nulls_first for backwards scan only.
+ */
+ cpathkey = make_pathkey_from_sortinfo(root,
+ keyCol,
+ NULL,
+ partscheme->partopfamily[i],
+ partscheme->partopcintype[i],
+ partscheme->partcollation[i],
+ ScanDirectionIsBackward(scandir),
+ ScanDirectionIsBackward(scandir),
+ 0,
+ partrel->relids,
+ false);
+
+
+ if (cpathkey)
+ {
+ /*
+ * We found the sort key in an EquivalenceClass, so it's relevant
+ * for this query. Add it to list, unless it's redundant.
+ */
+ if (!pathkey_is_redundant(cpathkey, retval))
+ retval = lappend(retval, cpathkey);
+ }
+ else
+ {
+ /*
+ * Boolean partition keys might be redundant even if they do not
+ * appear in an EquivalenceClass, because of our special treatment
+ * of boolean equality conditions --- see the comment for
+ * partkey_is_bool_constant_for_query(). If that applies, we can
+ * continue to examine lower-order partition keys. Otherwise, the
+ * sort key is not an interesting sort order for this query, so we
+ * should stop considering partition columns; any lower-order sort
+ * keys won't be useful either.
+ */
+ if (!partkey_is_bool_constant_for_query(partrel, i))
+ {
+ *partialkeys = true;
+ return retval;
+ }
+ }
+ }
+
+ *partialkeys = false;
+ return retval;
+}
+
+/*
+ * build_expression_pathkey
+ * Build a pathkeys list that describes an ordering by a single expression
+ * using the given sort operator.
+ *
+ * expr, nullable_relids, and rel are as for make_pathkey_from_sortinfo.
+ * We induce the other arguments assuming default sort order for the operator.
+ *
+ * Similarly to make_pathkey_from_sortinfo, the result is NIL if create_it
+ * is false and the expression isn't already in some EquivalenceClass.
+ */
+List *
+build_expression_pathkey(PlannerInfo *root,
+ Expr *expr,
+ Relids nullable_relids,
+ Oid opno,
+ Relids rel,
+ bool create_it)
+{
+ List *pathkeys;
+ Oid opfamily,
+ opcintype;
+ int16 strategy;
+ PathKey *cpathkey;
+
+ /* Find the operator in pg_amop --- failure shouldn't happen */
+ if (!get_ordering_op_properties(opno,
+ &opfamily, &opcintype, &strategy))
+ elog(ERROR, "operator %u is not a valid ordering operator",
+ opno);
+
+ cpathkey = make_pathkey_from_sortinfo(root,
+ expr,
+ nullable_relids,
+ opfamily,
+ opcintype,
+ exprCollation((Node *) expr),
+ (strategy == BTGreaterStrategyNumber),
+ (strategy == BTGreaterStrategyNumber),
+ 0,
+ rel,
+ create_it);
+
+ if (cpathkey)
+ pathkeys = list_make1(cpathkey);
+ else
+ pathkeys = NIL;
+
+ return pathkeys;
+}
+
+/*
+ * convert_subquery_pathkeys
+ * Build a pathkeys list that describes the ordering of a subquery's
+ * result, in the terms of the outer query. This is essentially a
+ * task of conversion.
+ *
+ * 'rel': outer query's RelOptInfo for the subquery relation.
+ * 'subquery_pathkeys': the subquery's output pathkeys, in its terms.
+ * 'subquery_tlist': the subquery's output targetlist, in its terms.
+ *
+ * We intentionally don't do truncate_useless_pathkeys() here, because there
+ * are situations where seeing the raw ordering of the subquery is helpful.
+ * For example, if it returns ORDER BY x DESC, that may prompt us to
+ * construct a mergejoin using DESC order rather than ASC order; but the
+ * right_merge_direction heuristic would have us throw the knowledge away.
+ */
+List *
+convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
+ List *subquery_pathkeys,
+ List *subquery_tlist)
+{
+ List *retval = NIL;
+ int retvallen = 0;
+ int outer_query_keys = list_length(root->query_pathkeys);
+ ListCell *i;
+
+ foreach(i, subquery_pathkeys)
+ {
+ PathKey *sub_pathkey = (PathKey *) lfirst(i);
+ EquivalenceClass *sub_eclass = sub_pathkey->pk_eclass;
+ PathKey *best_pathkey = NULL;
+
+ if (sub_eclass->ec_has_volatile)
+ {
+ /*
+ * If the sub_pathkey's EquivalenceClass is volatile, then it must
+ * have come from an ORDER BY clause, and we have to match it to
+ * that same targetlist entry.
+ */
+ TargetEntry *tle;
+ Var *outer_var;
+
+ if (sub_eclass->ec_sortref == 0) /* can't happen */
+ elog(ERROR, "volatile EquivalenceClass has no sortref");
+ tle = get_sortgroupref_tle(sub_eclass->ec_sortref, subquery_tlist);
+ Assert(tle);
+ /* Is TLE actually available to the outer query? */
+ outer_var = find_var_for_subquery_tle(rel, tle);
+ if (outer_var)
+ {
+ /* We can represent this sub_pathkey */
+ EquivalenceMember *sub_member;
+ EquivalenceClass *outer_ec;
+
+ Assert(list_length(sub_eclass->ec_members) == 1);
+ sub_member = (EquivalenceMember *) linitial(sub_eclass->ec_members);
+
+ /*
+ * Note: it might look funny to be setting sortref = 0 for a
+ * reference to a volatile sub_eclass. However, the
+ * expression is *not* volatile in the outer query: it's just
+ * a Var referencing whatever the subquery emitted. (IOW, the
+ * outer query isn't going to re-execute the volatile
+ * expression itself.) So this is okay. Likewise, it's
+ * correct to pass nullable_relids = NULL, because we're
+ * underneath any outer joins appearing in the outer query.
+ */
+ outer_ec =
+ get_eclass_for_sort_expr(root,
+ (Expr *) outer_var,
+ NULL,
+ sub_eclass->ec_opfamilies,
+ sub_member->em_datatype,
+ sub_eclass->ec_collation,
+ 0,
+ rel->relids,
+ false);
+
+ /*
+ * If we don't find a matching EC, sub-pathkey isn't
+ * interesting to the outer query
+ */
+ if (outer_ec)
+ best_pathkey =
+ make_canonical_pathkey(root,
+ outer_ec,
+ sub_pathkey->pk_opfamily,
+ sub_pathkey->pk_strategy,
+ sub_pathkey->pk_nulls_first);
+ }
+ }
+ else
+ {
+ /*
+ * Otherwise, the sub_pathkey's EquivalenceClass could contain
+ * multiple elements (representing knowledge that multiple items
+ * are effectively equal). Each element might match none, one, or
+ * more of the output columns that are visible to the outer query.
+ * This means we may have multiple possible representations of the
+ * sub_pathkey in the context of the outer query. Ideally we
+ * would generate them all and put them all into an EC of the
+ * outer query, thereby propagating equality knowledge up to the
+ * outer query. Right now we cannot do so, because the outer
+ * query's EquivalenceClasses are already frozen when this is
+ * called. Instead we prefer the one that has the highest "score"
+ * (number of EC peers, plus one if it matches the outer
+ * query_pathkeys). This is the most likely to be useful in the
+ * outer query.
+ */
+ int best_score = -1;
+ ListCell *j;
+
+ foreach(j, sub_eclass->ec_members)
+ {
+ EquivalenceMember *sub_member = (EquivalenceMember *) lfirst(j);
+ Expr *sub_expr = sub_member->em_expr;
+ Oid sub_expr_type = sub_member->em_datatype;
+ Oid sub_expr_coll = sub_eclass->ec_collation;
+ ListCell *k;
+
+ if (sub_member->em_is_child)
+ continue; /* ignore children here */
+
+ foreach(k, subquery_tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(k);
+ Var *outer_var;
+ Expr *tle_expr;
+ EquivalenceClass *outer_ec;
+ PathKey *outer_pk;
+ int score;
+
+ /* Is TLE actually available to the outer query? */
+ outer_var = find_var_for_subquery_tle(rel, tle);
+ if (!outer_var)
+ continue;
+
+ /*
+ * The targetlist entry is considered to match if it
+ * matches after sort-key canonicalization. That is
+ * needed since the sub_expr has been through the same
+ * process.
+ */
+ tle_expr = canonicalize_ec_expression(tle->expr,
+ sub_expr_type,
+ sub_expr_coll);
+ if (!equal(tle_expr, sub_expr))
+ continue;
+
+ /* See if we have a matching EC for the TLE */
+ outer_ec = get_eclass_for_sort_expr(root,
+ (Expr *) outer_var,
+ NULL,
+ sub_eclass->ec_opfamilies,
+ sub_expr_type,
+ sub_expr_coll,
+ 0,
+ rel->relids,
+ false);
+
+ /*
+ * If we don't find a matching EC, this sub-pathkey isn't
+ * interesting to the outer query
+ */
+ if (!outer_ec)
+ continue;
+
+ outer_pk = make_canonical_pathkey(root,
+ outer_ec,
+ sub_pathkey->pk_opfamily,
+ sub_pathkey->pk_strategy,
+ sub_pathkey->pk_nulls_first);
+ /* score = # of equivalence peers */
+ score = list_length(outer_ec->ec_members) - 1;
+ /* +1 if it matches the proper query_pathkeys item */
+ if (retvallen < outer_query_keys &&
+ list_nth(root->query_pathkeys, retvallen) == outer_pk)
+ score++;
+ if (score > best_score)
+ {
+ best_pathkey = outer_pk;
+ best_score = score;
+ }
+ }
+ }
+ }
+
+ /*
+ * If we couldn't find a representation of this sub_pathkey, we're
+ * done (we can't use the ones to its right, either).
+ */
+ if (!best_pathkey)
+ break;
+
+ /*
+ * Eliminate redundant ordering info; could happen if outer query
+ * equivalences subquery keys...
+ */
+ if (!pathkey_is_redundant(best_pathkey, retval))
+ {
+ retval = lappend(retval, best_pathkey);
+ retvallen++;
+ }
+ }
+
+ return retval;
+}
+
+/*
+ * find_var_for_subquery_tle
+ *
+ * If the given subquery tlist entry is due to be emitted by the subquery's
+ * scan node, return a Var for it, else return NULL.
+ *
+ * We need this to ensure that we don't return pathkeys describing values
+ * that are unavailable above the level of the subquery scan.
+ */
+static Var *
+find_var_for_subquery_tle(RelOptInfo *rel, TargetEntry *tle)
+{
+ ListCell *lc;
+
+ /* If the TLE is resjunk, it's certainly not visible to the outer query */
+ if (tle->resjunk)
+ return NULL;
+
+ /* Search the rel's targetlist to see what it will return */
+ foreach(lc, rel->reltarget->exprs)
+ {
+ Var *var = (Var *) lfirst(lc);
+
+ /* Ignore placeholders */
+ if (!IsA(var, Var))
+ continue;
+ Assert(var->varno == rel->relid);
+
+ /* If we find a Var referencing this TLE, we're good */
+ if (var->varattno == tle->resno)
+ return copyObject(var); /* Make a copy for safety */
+ }
+ return NULL;
+}
+
+/*
+ * build_join_pathkeys
+ * Build the path keys for a join relation constructed by mergejoin or
+ * nestloop join. This is normally the same as the outer path's keys.
+ *
+ * EXCEPTION: in a FULL or RIGHT join, we cannot treat the result as
+ * having the outer path's path keys, because null lefthand rows may be
+ * inserted at random points. It must be treated as unsorted.
+ *
+ * We truncate away any pathkeys that are uninteresting for higher joins.
+ *
+ * 'joinrel' is the join relation that paths are being formed for
+ * 'jointype' is the join type (inner, left, full, etc)
+ * 'outer_pathkeys' is the list of the current outer path's path keys
+ *
+ * Returns the list of new path keys.
+ */
+List *
+build_join_pathkeys(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ JoinType jointype,
+ List *outer_pathkeys)
+{
+ if (jointype == JOIN_FULL || jointype == JOIN_RIGHT)
+ return NIL;
+
+ /*
+ * This used to be quite a complex bit of code, but now that all pathkey
+ * sublists start out life canonicalized, we don't have to do a darn thing
+ * here!
+ *
+ * We do, however, need to truncate the pathkeys list, since it may
+ * contain pathkeys that were useful for forming this joinrel but are
+ * uninteresting to higher levels.
+ */
+ return truncate_useless_pathkeys(root, joinrel, outer_pathkeys);
+}
+
+/****************************************************************************
+ * PATHKEYS AND SORT CLAUSES
+ ****************************************************************************/
+
+/*
+ * make_pathkeys_for_sortclauses
+ * Generate a pathkeys list that represents the sort order specified
+ * by a list of SortGroupClauses
+ *
+ * The resulting PathKeys are always in canonical form. (Actually, there
+ * is no longer any code anywhere that creates non-canonical PathKeys.)
+ *
+ * We assume that root->nullable_baserels is the set of base relids that could
+ * have gone to NULL below the SortGroupClause expressions. This is okay if
+ * the expressions came from the query's top level (ORDER BY, DISTINCT, etc)
+ * and if this function is only invoked after deconstruct_jointree. In the
+ * future we might have to make callers pass in the appropriate
+ * nullable-relids set, but for now it seems unnecessary.
+ *
+ * 'sortclauses' is a list of SortGroupClause nodes
+ * 'tlist' is the targetlist to find the referenced tlist entries in
+ */
+List *
+make_pathkeys_for_sortclauses(PlannerInfo *root,
+ List *sortclauses,
+ List *tlist)
+{
+ List *pathkeys = NIL;
+ ListCell *l;
+
+ foreach(l, sortclauses)
+ {
+ SortGroupClause *sortcl = (SortGroupClause *) lfirst(l);
+ Expr *sortkey;
+ PathKey *pathkey;
+
+ sortkey = (Expr *) get_sortgroupclause_expr(sortcl, tlist);
+ Assert(OidIsValid(sortcl->sortop));
+ pathkey = make_pathkey_from_sortop(root,
+ sortkey,
+ root->nullable_baserels,
+ sortcl->sortop,
+ sortcl->nulls_first,
+ sortcl->tleSortGroupRef,
+ true);
+
+ /* Canonical form eliminates redundant ordering keys */
+ if (!pathkey_is_redundant(pathkey, pathkeys))
+ pathkeys = lappend(pathkeys, pathkey);
+ }
+ return pathkeys;
+}
+
+/****************************************************************************
+ * PATHKEYS AND MERGECLAUSES
+ ****************************************************************************/
+
+/*
+ * initialize_mergeclause_eclasses
+ * Set the EquivalenceClass links in a mergeclause restrictinfo.
+ *
+ * RestrictInfo contains fields in which we may cache pointers to
+ * EquivalenceClasses for the left and right inputs of the mergeclause.
+ * (If the mergeclause is a true equivalence clause these will be the
+ * same EquivalenceClass, otherwise not.) If the mergeclause is either
+ * used to generate an EquivalenceClass, or derived from an EquivalenceClass,
+ * then it's easy to set up the left_ec and right_ec members --- otherwise,
+ * this function should be called to set them up. We will generate new
+ * EquivalenceClauses if necessary to represent the mergeclause's left and
+ * right sides.
+ *
+ * Note this is called before EC merging is complete, so the links won't
+ * necessarily point to canonical ECs. Before they are actually used for
+ * anything, update_mergeclause_eclasses must be called to ensure that
+ * they've been updated to point to canonical ECs.
+ */
+void
+initialize_mergeclause_eclasses(PlannerInfo *root, RestrictInfo *restrictinfo)
+{
+ Expr *clause = restrictinfo->clause;
+ Oid lefttype,
+ righttype;
+
+ /* Should be a mergeclause ... */
+ Assert(restrictinfo->mergeopfamilies != NIL);
+ /* ... with links not yet set */
+ Assert(restrictinfo->left_ec == NULL);
+ Assert(restrictinfo->right_ec == NULL);
+
+ /* Need the declared input types of the operator */
+ op_input_types(((OpExpr *) clause)->opno, &lefttype, &righttype);
+
+ /* Find or create a matching EquivalenceClass for each side */
+ restrictinfo->left_ec =
+ get_eclass_for_sort_expr(root,
+ (Expr *) get_leftop(clause),
+ restrictinfo->nullable_relids,
+ restrictinfo->mergeopfamilies,
+ lefttype,
+ ((OpExpr *) clause)->inputcollid,
+ 0,
+ NULL,
+ true);
+ restrictinfo->right_ec =
+ get_eclass_for_sort_expr(root,
+ (Expr *) get_rightop(clause),
+ restrictinfo->nullable_relids,
+ restrictinfo->mergeopfamilies,
+ righttype,
+ ((OpExpr *) clause)->inputcollid,
+ 0,
+ NULL,
+ true);
+}
+
+/*
+ * update_mergeclause_eclasses
+ * Make the cached EquivalenceClass links valid in a mergeclause
+ * restrictinfo.
+ *
+ * These pointers should have been set by process_equivalence or
+ * initialize_mergeclause_eclasses, but they might have been set to
+ * non-canonical ECs that got merged later. Chase up to the canonical
+ * merged parent if so.
+ */
+void
+update_mergeclause_eclasses(PlannerInfo *root, RestrictInfo *restrictinfo)
+{
+ /* Should be a merge clause ... */
+ Assert(restrictinfo->mergeopfamilies != NIL);
+ /* ... with pointers already set */
+ Assert(restrictinfo->left_ec != NULL);
+ Assert(restrictinfo->right_ec != NULL);
+
+ /* Chase up to the top as needed */
+ while (restrictinfo->left_ec->ec_merged)
+ restrictinfo->left_ec = restrictinfo->left_ec->ec_merged;
+ while (restrictinfo->right_ec->ec_merged)
+ restrictinfo->right_ec = restrictinfo->right_ec->ec_merged;
+}
+
+/*
+ * find_mergeclauses_for_outer_pathkeys
+ * This routine attempts to find a list of mergeclauses that can be
+ * used with a specified ordering for the join's outer relation.
+ * If successful, it returns a list of mergeclauses.
+ *
+ * 'pathkeys' is a pathkeys list showing the ordering of an outer-rel path.
+ * 'restrictinfos' is a list of mergejoinable restriction clauses for the
+ * join relation being formed, in no particular order.
+ *
+ * The restrictinfos must be marked (via outer_is_left) to show which side
+ * of each clause is associated with the current outer path. (See
+ * select_mergejoin_clauses())
+ *
+ * The result is NIL if no merge can be done, else a maximal list of
+ * usable mergeclauses (represented as a list of their restrictinfo nodes).
+ * The list is ordered to match the pathkeys, as required for execution.
+ */
+List *
+find_mergeclauses_for_outer_pathkeys(PlannerInfo *root,
+ List *pathkeys,
+ List *restrictinfos)
+{
+ List *mergeclauses = NIL;
+ ListCell *i;
+
+ /* make sure we have eclasses cached in the clauses */
+ foreach(i, restrictinfos)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(i);
+
+ update_mergeclause_eclasses(root, rinfo);
+ }
+
+ foreach(i, pathkeys)
+ {
+ PathKey *pathkey = (PathKey *) lfirst(i);
+ EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
+ List *matched_restrictinfos = NIL;
+ ListCell *j;
+
+ /*----------
+ * A mergejoin clause matches a pathkey if it has the same EC.
+ * If there are multiple matching clauses, take them all. In plain
+ * inner-join scenarios we expect only one match, because
+ * equivalence-class processing will have removed any redundant
+ * mergeclauses. However, in outer-join scenarios there might be
+ * multiple matches. An example is
+ *
+ * select * from a full join b
+ * on a.v1 = b.v1 and a.v2 = b.v2 and a.v1 = b.v2;
+ *
+ * Given the pathkeys ({a.v1}, {a.v2}) it is okay to return all three
+ * clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and indeed
+ * we *must* do so or we will be unable to form a valid plan.
+ *
+ * We expect that the given pathkeys list is canonical, which means
+ * no two members have the same EC, so it's not possible for this
+ * code to enter the same mergeclause into the result list twice.
+ *
+ * It's possible that multiple matching clauses might have different
+ * ECs on the other side, in which case the order we put them into our
+ * result makes a difference in the pathkeys required for the inner
+ * input rel. However this routine hasn't got any info about which
+ * order would be best, so we don't worry about that.
+ *
+ * It's also possible that the selected mergejoin clauses produce
+ * a noncanonical ordering of pathkeys for the inner side, ie, we
+ * might select clauses that reference b.v1, b.v2, b.v1 in that
+ * order. This is not harmful in itself, though it suggests that
+ * the clauses are partially redundant. Since the alternative is
+ * to omit mergejoin clauses and thereby possibly fail to generate a
+ * plan altogether, we live with it. make_inner_pathkeys_for_merge()
+ * has to delete duplicates when it constructs the inner pathkeys
+ * list, and we also have to deal with such cases specially in
+ * create_mergejoin_plan().
+ *----------
+ */
+ foreach(j, restrictinfos)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(j);
+ EquivalenceClass *clause_ec;
+
+ clause_ec = rinfo->outer_is_left ?
+ rinfo->left_ec : rinfo->right_ec;
+ if (clause_ec == pathkey_ec)
+ matched_restrictinfos = lappend(matched_restrictinfos, rinfo);
+ }
+
+ /*
+ * If we didn't find a mergeclause, we're done --- any additional
+ * sort-key positions in the pathkeys are useless. (But we can still
+ * mergejoin if we found at least one mergeclause.)
+ */
+ if (matched_restrictinfos == NIL)
+ break;
+
+ /*
+ * If we did find usable mergeclause(s) for this sort-key position,
+ * add them to result list.
+ */
+ mergeclauses = list_concat(mergeclauses, matched_restrictinfos);
+ }
+
+ return mergeclauses;
+}
+
+/*
+ * select_outer_pathkeys_for_merge
+ * Builds a pathkey list representing a possible sort ordering
+ * that can be used with the given mergeclauses.
+ *
+ * 'mergeclauses' is a list of RestrictInfos for mergejoin clauses
+ * that will be used in a merge join.
+ * 'joinrel' is the join relation we are trying to construct.
+ *
+ * The restrictinfos must be marked (via outer_is_left) to show which side
+ * of each clause is associated with the current outer path. (See
+ * select_mergejoin_clauses())
+ *
+ * Returns a pathkeys list that can be applied to the outer relation.
+ *
+ * Since we assume here that a sort is required, there is no particular use
+ * in matching any available ordering of the outerrel. (joinpath.c has an
+ * entirely separate code path for considering sort-free mergejoins.) Rather,
+ * it's interesting to try to match the requested query_pathkeys so that a
+ * second output sort may be avoided; and failing that, we try to list "more
+ * popular" keys (those with the most unmatched EquivalenceClass peers)
+ * earlier, in hopes of making the resulting ordering useful for as many
+ * higher-level mergejoins as possible.
+ */
+List *
+select_outer_pathkeys_for_merge(PlannerInfo *root,
+ List *mergeclauses,
+ RelOptInfo *joinrel)
+{
+ List *pathkeys = NIL;
+ int nClauses = list_length(mergeclauses);
+ EquivalenceClass **ecs;
+ int *scores;
+ int necs;
+ ListCell *lc;
+ int j;
+
+ /* Might have no mergeclauses */
+ if (nClauses == 0)
+ return NIL;
+
+ /*
+ * Make arrays of the ECs used by the mergeclauses (dropping any
+ * duplicates) and their "popularity" scores.
+ */
+ ecs = (EquivalenceClass **) palloc(nClauses * sizeof(EquivalenceClass *));
+ scores = (int *) palloc(nClauses * sizeof(int));
+ necs = 0;
+
+ foreach(lc, mergeclauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ EquivalenceClass *oeclass;
+ int score;
+ ListCell *lc2;
+
+ /* get the outer eclass */
+ update_mergeclause_eclasses(root, rinfo);
+
+ if (rinfo->outer_is_left)
+ oeclass = rinfo->left_ec;
+ else
+ oeclass = rinfo->right_ec;
+
+ /* reject duplicates */
+ for (j = 0; j < necs; j++)
+ {
+ if (ecs[j] == oeclass)
+ break;
+ }
+ if (j < necs)
+ continue;
+
+ /* compute score */
+ score = 0;
+ foreach(lc2, oeclass->ec_members)
+ {
+ EquivalenceMember *em = (EquivalenceMember *) lfirst(lc2);
+
+ /* Potential future join partner? */
+ if (!em->em_is_const && !em->em_is_child &&
+ !bms_overlap(em->em_relids, joinrel->relids))
+ score++;
+ }
+
+ ecs[necs] = oeclass;
+ scores[necs] = score;
+ necs++;
+ }
+
+ /*
+ * Find out if we have all the ECs mentioned in query_pathkeys; if so we
+ * can generate a sort order that's also useful for final output. There is
+ * no percentage in a partial match, though, so we have to have 'em all.
+ */
+ if (root->query_pathkeys)
+ {
+ foreach(lc, root->query_pathkeys)
+ {
+ PathKey *query_pathkey = (PathKey *) lfirst(lc);
+ EquivalenceClass *query_ec = query_pathkey->pk_eclass;
+
+ for (j = 0; j < necs; j++)
+ {
+ if (ecs[j] == query_ec)
+ break; /* found match */
+ }
+ if (j >= necs)
+ break; /* didn't find match */
+ }
+ /* if we got to the end of the list, we have them all */
+ if (lc == NULL)
+ {
+ /* copy query_pathkeys as starting point for our output */
+ pathkeys = list_copy(root->query_pathkeys);
+ /* mark their ECs as already-emitted */
+ foreach(lc, root->query_pathkeys)
+ {
+ PathKey *query_pathkey = (PathKey *) lfirst(lc);
+ EquivalenceClass *query_ec = query_pathkey->pk_eclass;
+
+ for (j = 0; j < necs; j++)
+ {
+ if (ecs[j] == query_ec)
+ {
+ scores[j] = -1;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /*
+ * Add remaining ECs to the list in popularity order, using a default sort
+ * ordering. (We could use qsort() here, but the list length is usually
+ * so small it's not worth it.)
+ */
+ for (;;)
+ {
+ int best_j;
+ int best_score;
+ EquivalenceClass *ec;
+ PathKey *pathkey;
+
+ best_j = 0;
+ best_score = scores[0];
+ for (j = 1; j < necs; j++)
+ {
+ if (scores[j] > best_score)
+ {
+ best_j = j;
+ best_score = scores[j];
+ }
+ }
+ if (best_score < 0)
+ break; /* all done */
+ ec = ecs[best_j];
+ scores[best_j] = -1;
+ pathkey = make_canonical_pathkey(root,
+ ec,
+ linitial_oid(ec->ec_opfamilies),
+ BTLessStrategyNumber,
+ false);
+ /* can't be redundant because no duplicate ECs */
+ Assert(!pathkey_is_redundant(pathkey, pathkeys));
+ pathkeys = lappend(pathkeys, pathkey);
+ }
+
+ pfree(ecs);
+ pfree(scores);
+
+ return pathkeys;
+}
+
+/*
+ * make_inner_pathkeys_for_merge
+ * Builds a pathkey list representing the explicit sort order that
+ * must be applied to an inner path to make it usable with the
+ * given mergeclauses.
+ *
+ * 'mergeclauses' is a list of RestrictInfos for the mergejoin clauses
+ * that will be used in a merge join, in order.
+ * 'outer_pathkeys' are the already-known canonical pathkeys for the outer
+ * side of the join.
+ *
+ * The restrictinfos must be marked (via outer_is_left) to show which side
+ * of each clause is associated with the current outer path. (See
+ * select_mergejoin_clauses())
+ *
+ * Returns a pathkeys list that can be applied to the inner relation.
+ *
+ * Note that it is not this routine's job to decide whether sorting is
+ * actually needed for a particular input path. Assume a sort is necessary;
+ * just make the keys, eh?
+ */
+List *
+make_inner_pathkeys_for_merge(PlannerInfo *root,
+ List *mergeclauses,
+ List *outer_pathkeys)
+{
+ List *pathkeys = NIL;
+ EquivalenceClass *lastoeclass;
+ PathKey *opathkey;
+ ListCell *lc;
+ ListCell *lop;
+
+ lastoeclass = NULL;
+ opathkey = NULL;
+ lop = list_head(outer_pathkeys);
+
+ foreach(lc, mergeclauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ EquivalenceClass *oeclass;
+ EquivalenceClass *ieclass;
+ PathKey *pathkey;
+
+ update_mergeclause_eclasses(root, rinfo);
+
+ if (rinfo->outer_is_left)
+ {
+ oeclass = rinfo->left_ec;
+ ieclass = rinfo->right_ec;
+ }
+ else
+ {
+ oeclass = rinfo->right_ec;
+ ieclass = rinfo->left_ec;
+ }
+
+ /* outer eclass should match current or next pathkeys */
+ /* we check this carefully for debugging reasons */
+ if (oeclass != lastoeclass)
+ {
+ if (!lop)
+ elog(ERROR, "too few pathkeys for mergeclauses");
+ opathkey = (PathKey *) lfirst(lop);
+ lop = lnext(outer_pathkeys, lop);
+ lastoeclass = opathkey->pk_eclass;
+ if (oeclass != lastoeclass)
+ elog(ERROR, "outer pathkeys do not match mergeclause");
+ }
+
+ /*
+ * Often, we'll have same EC on both sides, in which case the outer
+ * pathkey is also canonical for the inner side, and we can skip a
+ * useless search.
+ */
+ if (ieclass == oeclass)
+ pathkey = opathkey;
+ else
+ pathkey = make_canonical_pathkey(root,
+ ieclass,
+ opathkey->pk_opfamily,
+ opathkey->pk_strategy,
+ opathkey->pk_nulls_first);
+
+ /*
+ * Don't generate redundant pathkeys (which can happen if multiple
+ * mergeclauses refer to the same EC). Because we do this, the output
+ * pathkey list isn't necessarily ordered like the mergeclauses, which
+ * complicates life for create_mergejoin_plan(). But if we didn't,
+ * we'd have a noncanonical sort key list, which would be bad; for one
+ * reason, it certainly wouldn't match any available sort order for
+ * the input relation.
+ */
+ if (!pathkey_is_redundant(pathkey, pathkeys))
+ pathkeys = lappend(pathkeys, pathkey);
+ }
+
+ return pathkeys;
+}
+
+/*
+ * trim_mergeclauses_for_inner_pathkeys
+ * This routine trims a list of mergeclauses to include just those that
+ * work with a specified ordering for the join's inner relation.
+ *
+ * 'mergeclauses' is a list of RestrictInfos for mergejoin clauses for the
+ * join relation being formed, in an order known to work for the
+ * currently-considered sort ordering of the join's outer rel.
+ * 'pathkeys' is a pathkeys list showing the ordering of an inner-rel path;
+ * it should be equal to, or a truncation of, the result of
+ * make_inner_pathkeys_for_merge for these mergeclauses.
+ *
+ * What we return will be a prefix of the given mergeclauses list.
+ *
+ * We need this logic because make_inner_pathkeys_for_merge's result isn't
+ * necessarily in the same order as the mergeclauses. That means that if we
+ * consider an inner-rel pathkey list that is a truncation of that result,
+ * we might need to drop mergeclauses even though they match a surviving inner
+ * pathkey. This happens when they are to the right of a mergeclause that
+ * matches a removed inner pathkey.
+ *
+ * The mergeclauses must be marked (via outer_is_left) to show which side
+ * of each clause is associated with the current outer path. (See
+ * select_mergejoin_clauses())
+ */
+List *
+trim_mergeclauses_for_inner_pathkeys(PlannerInfo *root,
+ List *mergeclauses,
+ List *pathkeys)
+{
+ List *new_mergeclauses = NIL;
+ PathKey *pathkey;
+ EquivalenceClass *pathkey_ec;
+ bool matched_pathkey;
+ ListCell *lip;
+ ListCell *i;
+
+ /* No pathkeys => no mergeclauses (though we don't expect this case) */
+ if (pathkeys == NIL)
+ return NIL;
+ /* Initialize to consider first pathkey */
+ lip = list_head(pathkeys);
+ pathkey = (PathKey *) lfirst(lip);
+ pathkey_ec = pathkey->pk_eclass;
+ lip = lnext(pathkeys, lip);
+ matched_pathkey = false;
+
+ /* Scan mergeclauses to see how many we can use */
+ foreach(i, mergeclauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(i);
+ EquivalenceClass *clause_ec;
+
+ /* Assume we needn't do update_mergeclause_eclasses again here */
+
+ /* Check clause's inner-rel EC against current pathkey */
+ clause_ec = rinfo->outer_is_left ?
+ rinfo->right_ec : rinfo->left_ec;
+
+ /* If we don't have a match, attempt to advance to next pathkey */
+ if (clause_ec != pathkey_ec)
+ {
+ /* If we had no clauses matching this inner pathkey, must stop */
+ if (!matched_pathkey)
+ break;
+
+ /* Advance to next inner pathkey, if any */
+ if (lip == NULL)
+ break;
+ pathkey = (PathKey *) lfirst(lip);
+ pathkey_ec = pathkey->pk_eclass;
+ lip = lnext(pathkeys, lip);
+ matched_pathkey = false;
+ }
+
+ /* If mergeclause matches current inner pathkey, we can use it */
+ if (clause_ec == pathkey_ec)
+ {
+ new_mergeclauses = lappend(new_mergeclauses, rinfo);
+ matched_pathkey = true;
+ }
+ else
+ {
+ /* Else, no hope of adding any more mergeclauses */
+ break;
+ }
+ }
+
+ return new_mergeclauses;
+}
+
+
+/****************************************************************************
+ * PATHKEY USEFULNESS CHECKS
+ *
+ * We only want to remember as many of the pathkeys of a path as have some
+ * potential use, either for subsequent mergejoins or for meeting the query's
+ * requested output ordering. This ensures that add_path() won't consider
+ * a path to have a usefully different ordering unless it really is useful.
+ * These routines check for usefulness of given pathkeys.
+ ****************************************************************************/
+
+/*
+ * pathkeys_useful_for_merging
+ * Count the number of pathkeys that may be useful for mergejoins
+ * above the given relation.
+ *
+ * We consider a pathkey potentially useful if it corresponds to the merge
+ * ordering of either side of any joinclause for the rel. This might be
+ * overoptimistic, since joinclauses that require different other relations
+ * might never be usable at the same time, but trying to be exact is likely
+ * to be more trouble than it's worth.
+ *
+ * To avoid doubling the number of mergejoin paths considered, we would like
+ * to consider only one of the two scan directions (ASC or DESC) as useful
+ * for merging for any given target column. The choice is arbitrary unless
+ * one of the directions happens to match an ORDER BY key, in which case
+ * that direction should be preferred, in hopes of avoiding a final sort step.
+ * right_merge_direction() implements this heuristic.
+ */
+static int
+pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
+{
+ int useful = 0;
+ ListCell *i;
+
+ foreach(i, pathkeys)
+ {
+ PathKey *pathkey = (PathKey *) lfirst(i);
+ bool matched = false;
+ ListCell *j;
+
+ /* If "wrong" direction, not useful for merging */
+ if (!right_merge_direction(root, pathkey))
+ break;
+
+ /*
+ * First look into the EquivalenceClass of the pathkey, to see if
+ * there are any members not yet joined to the rel. If so, it's
+ * surely possible to generate a mergejoin clause using them.
+ */
+ if (rel->has_eclass_joins &&
+ eclass_useful_for_merging(root, pathkey->pk_eclass, rel))
+ matched = true;
+ else
+ {
+ /*
+ * Otherwise search the rel's joininfo list, which contains
+ * non-EquivalenceClass-derivable join clauses that might
+ * nonetheless be mergejoinable.
+ */
+ foreach(j, rel->joininfo)
+ {
+ RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(j);
+
+ if (restrictinfo->mergeopfamilies == NIL)
+ continue;
+ update_mergeclause_eclasses(root, restrictinfo);
+
+ if (pathkey->pk_eclass == restrictinfo->left_ec ||
+ pathkey->pk_eclass == restrictinfo->right_ec)
+ {
+ matched = true;
+ break;
+ }
+ }
+ }
+
+ /*
+ * If we didn't find a mergeclause, we're done --- any additional
+ * sort-key positions in the pathkeys are useless. (But we can still
+ * mergejoin if we found at least one mergeclause.)
+ */
+ if (matched)
+ useful++;
+ else
+ break;
+ }
+
+ return useful;
+}
+
+/*
+ * right_merge_direction
+ * Check whether the pathkey embodies the preferred sort direction
+ * for merging its target column.
+ */
+static bool
+right_merge_direction(PlannerInfo *root, PathKey *pathkey)
+{
+ ListCell *l;
+
+ foreach(l, root->query_pathkeys)
+ {
+ PathKey *query_pathkey = (PathKey *) lfirst(l);
+
+ if (pathkey->pk_eclass == query_pathkey->pk_eclass &&
+ pathkey->pk_opfamily == query_pathkey->pk_opfamily)
+ {
+ /*
+ * Found a matching query sort column. Prefer this pathkey's
+ * direction iff it matches. Note that we ignore pk_nulls_first,
+ * which means that a sort might be needed anyway ... but we still
+ * want to prefer only one of the two possible directions, and we
+ * might as well use this one.
+ */
+ return (pathkey->pk_strategy == query_pathkey->pk_strategy);
+ }
+ }
+
+ /* If no matching ORDER BY request, prefer the ASC direction */
+ return (pathkey->pk_strategy == BTLessStrategyNumber);
+}
+
+/*
+ * pathkeys_useful_for_ordering
+ * Count the number of pathkeys that are useful for meeting the
+ * query's requested output ordering.
+ *
+ * Because we the have the possibility of incremental sort, a prefix list of
+ * keys is potentially useful for improving the performance of the requested
+ * ordering. Thus we return 0, if no valuable keys are found, or the number
+ * of leading keys shared by the list and the requested ordering..
+ */
+static int
+pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys)
+{
+ int n_common_pathkeys;
+
+ if (root->query_pathkeys == NIL)
+ return 0; /* no special ordering requested */
+
+ if (pathkeys == NIL)
+ return 0; /* unordered path */
+
+ (void) pathkeys_count_contained_in(root->query_pathkeys, pathkeys,
+ &n_common_pathkeys);
+
+ return n_common_pathkeys;
+}
+
+/*
+ * truncate_useless_pathkeys
+ * Shorten the given pathkey list to just the useful pathkeys.
+ */
+List *
+truncate_useless_pathkeys(PlannerInfo *root,
+ RelOptInfo *rel,
+ List *pathkeys)
+{
+ int nuseful;
+ int nuseful2;
+
+ nuseful = pathkeys_useful_for_merging(root, rel, pathkeys);
+ nuseful2 = pathkeys_useful_for_ordering(root, pathkeys);
+ if (nuseful2 > nuseful)
+ nuseful = nuseful2;
+
+ /*
+ * Note: not safe to modify input list destructively, but we can avoid
+ * copying the list if we're not actually going to change it
+ */
+ if (nuseful == 0)
+ return NIL;
+ else if (nuseful == list_length(pathkeys))
+ return pathkeys;
+ else
+ return list_truncate(list_copy(pathkeys), nuseful);
+}
+
+/*
+ * has_useful_pathkeys
+ * Detect whether the specified rel could have any pathkeys that are
+ * useful according to truncate_useless_pathkeys().
+ *
+ * This is a cheap test that lets us skip building pathkeys at all in very
+ * simple queries. It's OK to err in the direction of returning "true" when
+ * there really aren't any usable pathkeys, but erring in the other direction
+ * is bad --- so keep this in sync with the routines above!
+ *
+ * We could make the test more complex, for example checking to see if any of
+ * the joinclauses are really mergejoinable, but that likely wouldn't win
+ * often enough to repay the extra cycles. Queries with neither a join nor
+ * a sort are reasonably common, though, so this much work seems worthwhile.
+ */
+bool
+has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel)
+{
+ if (rel->joininfo != NIL || rel->has_eclass_joins)
+ return true; /* might be able to use pathkeys for merging */
+ if (root->query_pathkeys != NIL)
+ return true; /* might be able to use them for ordering */
+ return false; /* definitely useless */
+}
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
new file mode 100644
index 0000000..279ca1f
--- /dev/null
+++ b/src/backend/optimizer/path/tidpath.c
@@ -0,0 +1,528 @@
+/*-------------------------------------------------------------------------
+ *
+ * tidpath.c
+ * Routines to determine which TID conditions are usable for scanning
+ * a given relation, and create TidPaths and TidRangePaths accordingly.
+ *
+ * For TidPaths, we look for WHERE conditions of the form
+ * "CTID = pseudoconstant", which can be implemented by just fetching
+ * the tuple directly via heap_fetch(). We can also handle OR'd conditions
+ * such as (CTID = const1) OR (CTID = const2), as well as ScalarArrayOpExpr
+ * conditions of the form CTID = ANY(pseudoconstant_array). In particular
+ * this allows
+ * WHERE ctid IN (tid1, tid2, ...)
+ *
+ * As with indexscans, our definition of "pseudoconstant" is pretty liberal:
+ * we allow anything that doesn't involve a volatile function or a Var of
+ * the relation under consideration. Vars belonging to other relations of
+ * the query are allowed, giving rise to parameterized TID scans.
+ *
+ * We also support "WHERE CURRENT OF cursor" conditions (CurrentOfExpr),
+ * which amount to "CTID = run-time-determined-TID". These could in
+ * theory be translated to a simple comparison of CTID to the result of
+ * a function, but in practice it works better to keep the special node
+ * representation all the way through to execution.
+ *
+ * Additionally, TidRangePaths may be created for conditions of the form
+ * "CTID relop pseudoconstant", where relop is one of >,>=,<,<=, and
+ * AND-clauses composed of such conditions.
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/path/tidpath.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/sysattr.h"
+#include "catalog/pg_operator.h"
+#include "catalog/pg_type.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/restrictinfo.h"
+
+
+/*
+ * Does this Var represent the CTID column of the specified baserel?
+ */
+static inline bool
+IsCTIDVar(Var *var, RelOptInfo *rel)
+{
+ /* The vartype check is strictly paranoia */
+ if (var->varattno == SelfItemPointerAttributeNumber &&
+ var->vartype == TIDOID &&
+ var->varno == rel->relid &&
+ var->varlevelsup == 0)
+ return true;
+ return false;
+}
+
+/*
+ * Check to see if a RestrictInfo is of the form
+ * CTID OP pseudoconstant
+ * or
+ * pseudoconstant OP CTID
+ * where OP is a binary operation, the CTID Var belongs to relation "rel",
+ * and nothing on the other side of the clause does.
+ */
+static bool
+IsBinaryTidClause(RestrictInfo *rinfo, RelOptInfo *rel)
+{
+ OpExpr *node;
+ Node *arg1,
+ *arg2,
+ *other;
+ Relids other_relids;
+
+ /* Must be an OpExpr */
+ if (!is_opclause(rinfo->clause))
+ return false;
+ node = (OpExpr *) rinfo->clause;
+
+ /* OpExpr must have two arguments */
+ if (list_length(node->args) != 2)
+ return false;
+ arg1 = linitial(node->args);
+ arg2 = lsecond(node->args);
+
+ /* Look for CTID as either argument */
+ other = NULL;
+ other_relids = NULL;
+ if (arg1 && IsA(arg1, Var) &&
+ IsCTIDVar((Var *) arg1, rel))
+ {
+ other = arg2;
+ other_relids = rinfo->right_relids;
+ }
+ if (!other && arg2 && IsA(arg2, Var) &&
+ IsCTIDVar((Var *) arg2, rel))
+ {
+ other = arg1;
+ other_relids = rinfo->left_relids;
+ }
+ if (!other)
+ return false;
+
+ /* The other argument must be a pseudoconstant */
+ if (bms_is_member(rel->relid, other_relids) ||
+ contain_volatile_functions(other))
+ return false;
+
+ return true; /* success */
+}
+
+/*
+ * Check to see if a RestrictInfo is of the form
+ * CTID = pseudoconstant
+ * or
+ * pseudoconstant = CTID
+ * where the CTID Var belongs to relation "rel", and nothing on the
+ * other side of the clause does.
+ */
+static bool
+IsTidEqualClause(RestrictInfo *rinfo, RelOptInfo *rel)
+{
+ if (!IsBinaryTidClause(rinfo, rel))
+ return false;
+
+ if (((OpExpr *) rinfo->clause)->opno == TIDEqualOperator)
+ return true;
+
+ return false;
+}
+
+/*
+ * Check to see if a RestrictInfo is of the form
+ * CTID OP pseudoconstant
+ * or
+ * pseudoconstant OP CTID
+ * where OP is a range operator such as <, <=, >, or >=, the CTID Var belongs
+ * to relation "rel", and nothing on the other side of the clause does.
+ */
+static bool
+IsTidRangeClause(RestrictInfo *rinfo, RelOptInfo *rel)
+{
+ Oid opno;
+
+ if (!IsBinaryTidClause(rinfo, rel))
+ return false;
+ opno = ((OpExpr *) rinfo->clause)->opno;
+
+ if (opno == TIDLessOperator || opno == TIDLessEqOperator ||
+ opno == TIDGreaterOperator || opno == TIDGreaterEqOperator)
+ return true;
+
+ return false;
+}
+
+/*
+ * Check to see if a RestrictInfo is of the form
+ * CTID = ANY (pseudoconstant_array)
+ * where the CTID Var belongs to relation "rel", and nothing on the
+ * other side of the clause does.
+ */
+static bool
+IsTidEqualAnyClause(PlannerInfo *root, RestrictInfo *rinfo, RelOptInfo *rel)
+{
+ ScalarArrayOpExpr *node;
+ Node *arg1,
+ *arg2;
+
+ /* Must be a ScalarArrayOpExpr */
+ if (!(rinfo->clause && IsA(rinfo->clause, ScalarArrayOpExpr)))
+ return false;
+ node = (ScalarArrayOpExpr *) rinfo->clause;
+
+ /* Operator must be tideq */
+ if (node->opno != TIDEqualOperator)
+ return false;
+ if (!node->useOr)
+ return false;
+ Assert(list_length(node->args) == 2);
+ arg1 = linitial(node->args);
+ arg2 = lsecond(node->args);
+
+ /* CTID must be first argument */
+ if (arg1 && IsA(arg1, Var) &&
+ IsCTIDVar((Var *) arg1, rel))
+ {
+ /* The other argument must be a pseudoconstant */
+ if (bms_is_member(rel->relid, pull_varnos(root, arg2)) ||
+ contain_volatile_functions(arg2))
+ return false;
+
+ return true; /* success */
+ }
+
+ return false;
+}
+
+/*
+ * Check to see if a RestrictInfo is a CurrentOfExpr referencing "rel".
+ */
+static bool
+IsCurrentOfClause(RestrictInfo *rinfo, RelOptInfo *rel)
+{
+ CurrentOfExpr *node;
+
+ /* Must be a CurrentOfExpr */
+ if (!(rinfo->clause && IsA(rinfo->clause, CurrentOfExpr)))
+ return false;
+ node = (CurrentOfExpr *) rinfo->clause;
+
+ /* If it references this rel, we're good */
+ if (node->cvarno == rel->relid)
+ return true;
+
+ return false;
+}
+
+/*
+ * Extract a set of CTID conditions from the given RestrictInfo
+ *
+ * Returns a List of CTID qual RestrictInfos for the specified rel (with
+ * implicit OR semantics across the list), or NIL if there are no usable
+ * conditions.
+ *
+ * This function considers only base cases; AND/OR combination is handled
+ * below. Therefore the returned List never has more than one element.
+ * (Using a List may seem a bit weird, but it simplifies the caller.)
+ */
+static List *
+TidQualFromRestrictInfo(PlannerInfo *root, RestrictInfo *rinfo, RelOptInfo *rel)
+{
+ /*
+ * We may ignore pseudoconstant clauses (they can't contain Vars, so could
+ * not match anyway).
+ */
+ if (rinfo->pseudoconstant)
+ return NIL;
+
+ /*
+ * If clause must wait till after some lower-security-level restriction
+ * clause, reject it.
+ */
+ if (!restriction_is_securely_promotable(rinfo, rel))
+ return NIL;
+
+ /*
+ * Check all base cases. If we get a match, return the clause.
+ */
+ if (IsTidEqualClause(rinfo, rel) ||
+ IsTidEqualAnyClause(root, rinfo, rel) ||
+ IsCurrentOfClause(rinfo, rel))
+ return list_make1(rinfo);
+
+ return NIL;
+}
+
+/*
+ * Extract a set of CTID conditions from implicit-AND List of RestrictInfos
+ *
+ * Returns a List of CTID qual RestrictInfos for the specified rel (with
+ * implicit OR semantics across the list), or NIL if there are no usable
+ * equality conditions.
+ *
+ * This function is just concerned with handling AND/OR recursion.
+ */
+static List *
+TidQualFromRestrictInfoList(PlannerInfo *root, List *rlist, RelOptInfo *rel)
+{
+ List *rlst = NIL;
+ ListCell *l;
+
+ foreach(l, rlist)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+
+ if (restriction_is_or_clause(rinfo))
+ {
+ ListCell *j;
+
+ /*
+ * We must be able to extract a CTID condition from every
+ * sub-clause of an OR, or we can't use it.
+ */
+ foreach(j, ((BoolExpr *) rinfo->orclause)->args)
+ {
+ Node *orarg = (Node *) lfirst(j);
+ List *sublist;
+
+ /* OR arguments should be ANDs or sub-RestrictInfos */
+ if (is_andclause(orarg))
+ {
+ List *andargs = ((BoolExpr *) orarg)->args;
+
+ /* Recurse in case there are sub-ORs */
+ sublist = TidQualFromRestrictInfoList(root, andargs, rel);
+ }
+ else
+ {
+ RestrictInfo *rinfo = castNode(RestrictInfo, orarg);
+
+ Assert(!restriction_is_or_clause(rinfo));
+ sublist = TidQualFromRestrictInfo(root, rinfo, rel);
+ }
+
+ /*
+ * If nothing found in this arm, we can't do anything with
+ * this OR clause.
+ */
+ if (sublist == NIL)
+ {
+ rlst = NIL; /* forget anything we had */
+ break; /* out of loop over OR args */
+ }
+
+ /*
+ * OK, continue constructing implicitly-OR'ed result list.
+ */
+ rlst = list_concat(rlst, sublist);
+ }
+ }
+ else
+ {
+ /* Not an OR clause, so handle base cases */
+ rlst = TidQualFromRestrictInfo(root, rinfo, rel);
+ }
+
+ /*
+ * Stop as soon as we find any usable CTID condition. In theory we
+ * could get CTID equality conditions from different AND'ed clauses,
+ * in which case we could try to pick the most efficient one. In
+ * practice, such usage seems very unlikely, so we don't bother; we
+ * just exit as soon as we find the first candidate.
+ */
+ if (rlst)
+ break;
+ }
+
+ return rlst;
+}
+
+/*
+ * Extract a set of CTID range conditions from implicit-AND List of RestrictInfos
+ *
+ * Returns a List of CTID range qual RestrictInfos for the specified rel
+ * (with implicit AND semantics across the list), or NIL if there are no
+ * usable range conditions or if the rel's table AM does not support TID range
+ * scans.
+ */
+static List *
+TidRangeQualFromRestrictInfoList(List *rlist, RelOptInfo *rel)
+{
+ List *rlst = NIL;
+ ListCell *l;
+
+ if ((rel->amflags & AMFLAG_HAS_TID_RANGE) == 0)
+ return NIL;
+
+ foreach(l, rlist)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+
+ if (IsTidRangeClause(rinfo, rel))
+ rlst = lappend(rlst, rinfo);
+ }
+
+ return rlst;
+}
+
+/*
+ * Given a list of join clauses involving our rel, create a parameterized
+ * TidPath for each one that is a suitable TidEqual clause.
+ *
+ * In principle we could combine clauses that reference the same outer rels,
+ * but it doesn't seem like such cases would arise often enough to be worth
+ * troubling over.
+ */
+static void
+BuildParameterizedTidPaths(PlannerInfo *root, RelOptInfo *rel, List *clauses)
+{
+ ListCell *l;
+
+ foreach(l, clauses)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+ List *tidquals;
+ Relids required_outer;
+
+ /*
+ * Validate whether each clause is actually usable; we must check this
+ * even when examining clauses generated from an EquivalenceClass,
+ * since they might not satisfy the restriction on not having Vars of
+ * our rel on the other side, or somebody might've built an operator
+ * class that accepts type "tid" but has other operators in it.
+ *
+ * We currently consider only TidEqual join clauses. In principle we
+ * might find a suitable ScalarArrayOpExpr in the rel's joininfo list,
+ * but it seems unlikely to be worth expending the cycles to check.
+ * And we definitely won't find a CurrentOfExpr here. Hence, we don't
+ * use TidQualFromRestrictInfo; but this must match that function
+ * otherwise.
+ */
+ if (rinfo->pseudoconstant ||
+ !restriction_is_securely_promotable(rinfo, rel) ||
+ !IsTidEqualClause(rinfo, rel))
+ continue;
+
+ /*
+ * Check if clause can be moved to this rel; this is probably
+ * redundant when considering EC-derived clauses, but we must check it
+ * for "loose" join clauses.
+ */
+ if (!join_clause_is_movable_to(rinfo, rel))
+ continue;
+
+ /* OK, make list of clauses for this path */
+ tidquals = list_make1(rinfo);
+
+ /* Compute required outer rels for this path */
+ required_outer = bms_union(rinfo->required_relids, rel->lateral_relids);
+ required_outer = bms_del_member(required_outer, rel->relid);
+
+ add_path(rel, (Path *) create_tidscan_path(root, rel, tidquals,
+ required_outer));
+ }
+}
+
+/*
+ * Test whether an EquivalenceClass member matches our rel's CTID Var.
+ *
+ * This is a callback for use by generate_implied_equalities_for_column.
+ */
+static bool
+ec_member_matches_ctid(PlannerInfo *root, RelOptInfo *rel,
+ EquivalenceClass *ec, EquivalenceMember *em,
+ void *arg)
+{
+ if (em->em_expr && IsA(em->em_expr, Var) &&
+ IsCTIDVar((Var *) em->em_expr, rel))
+ return true;
+ return false;
+}
+
+/*
+ * create_tidscan_paths
+ * Create paths corresponding to direct TID scans of the given rel.
+ *
+ * Candidate paths are added to the rel's pathlist (using add_path).
+ */
+void
+create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel)
+{
+ List *tidquals;
+ List *tidrangequals;
+
+ /*
+ * If any suitable quals exist in the rel's baserestrict list, generate a
+ * plain (unparameterized) TidPath with them.
+ */
+ tidquals = TidQualFromRestrictInfoList(root, rel->baserestrictinfo, rel);
+
+ if (tidquals != NIL)
+ {
+ /*
+ * This path uses no join clauses, but it could still have required
+ * parameterization due to LATERAL refs in its tlist.
+ */
+ Relids required_outer = rel->lateral_relids;
+
+ add_path(rel, (Path *) create_tidscan_path(root, rel, tidquals,
+ required_outer));
+ }
+
+ /*
+ * If there are range quals in the baserestrict list, generate a
+ * TidRangePath.
+ */
+ tidrangequals = TidRangeQualFromRestrictInfoList(rel->baserestrictinfo,
+ rel);
+
+ if (tidrangequals != NIL)
+ {
+ /*
+ * This path uses no join clauses, but it could still have required
+ * parameterization due to LATERAL refs in its tlist.
+ */
+ Relids required_outer = rel->lateral_relids;
+
+ add_path(rel, (Path *) create_tidrangescan_path(root, rel,
+ tidrangequals,
+ required_outer));
+ }
+
+ /*
+ * Try to generate parameterized TidPaths using equality clauses extracted
+ * from EquivalenceClasses. (This is important since simple "t1.ctid =
+ * t2.ctid" clauses will turn into ECs.)
+ */
+ if (rel->has_eclass_joins)
+ {
+ List *clauses;
+
+ /* Generate clauses, skipping any that join to lateral_referencers */
+ clauses = generate_implied_equalities_for_column(root,
+ rel,
+ ec_member_matches_ctid,
+ NULL,
+ rel->lateral_referencers);
+
+ /* Generate a path for each usable join clause */
+ BuildParameterizedTidPaths(root, rel, clauses);
+ }
+
+ /*
+ * Also consider parameterized TidPaths using "loose" join quals. Quals
+ * of the form "t1.ctid = t2.ctid" would turn into these if they are outer
+ * join quals, for example.
+ */
+ BuildParameterizedTidPaths(root, rel, rel->joininfo);
+}
diff --git a/src/backend/optimizer/plan/Makefile b/src/backend/optimizer/plan/Makefile
new file mode 100644
index 0000000..80ef162
--- /dev/null
+++ b/src/backend/optimizer/plan/Makefile
@@ -0,0 +1,25 @@
+#-------------------------------------------------------------------------
+#
+# Makefile--
+# Makefile for optimizer/plan
+#
+# IDENTIFICATION
+# src/backend/optimizer/plan/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/backend/optimizer/plan
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+
+OBJS = \
+ analyzejoins.o \
+ createplan.o \
+ initsplan.o \
+ planagg.o \
+ planmain.o \
+ planner.o \
+ setrefs.o \
+ subselect.o
+
+include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/optimizer/plan/README b/src/backend/optimizer/plan/README
new file mode 100644
index 0000000..013c0f9
--- /dev/null
+++ b/src/backend/optimizer/plan/README
@@ -0,0 +1,158 @@
+src/backend/optimizer/plan/README
+
+Subselects
+==========
+
+Vadim B. Mikheev
+
+
+From owner-pgsql-hackers@hub.org Fri Feb 13 09:01:19 1998
+Received: from renoir.op.net (root@renoir.op.net [209.152.193.4])
+ by candle.pha.pa.us (8.8.5/8.8.5) with ESMTP id JAA11576
+ for <maillist@candle.pha.pa.us>; Fri, 13 Feb 1998 09:01:17 -0500 (EST)
+Received: from hub.org (hub.org [209.47.148.200]) by renoir.op.net (o1/$Revision: 1.14 $) with ESMTP id IAA09761 for <maillist@candle.pha.pa.us>; Fri, 13 Feb 1998 08:41:22 -0500 (EST)
+Received: from localhost (majordom@localhost) by hub.org (8.8.8/8.7.5) with SMTP id IAA08135; Fri, 13 Feb 1998 08:40:17 -0500 (EST)
+Received: by hub.org (TLB v0.10a (1.23 tibbs 1997/01/09 00:29:32)); Fri, 13 Feb 1998 08:38:42 -0500 (EST)
+Received: (from majordom@localhost) by hub.org (8.8.8/8.7.5) id IAA06646 for pgsql-hackers-outgoing; Fri, 13 Feb 1998 08:38:35 -0500 (EST)
+Received: from dune.krasnet.ru (dune.krasnet.ru [193.125.44.86]) by hub.org (8.8.8/8.7.5) with ESMTP id IAA04568 for <hackers@postgreSQL.org>; Fri, 13 Feb 1998 08:37:16 -0500 (EST)
+Received: from sable.krasnoyarsk.su (dune.krasnet.ru [193.125.44.86])
+ by dune.krasnet.ru (8.8.7/8.8.7) with ESMTP id UAA13717
+ for <hackers@postgreSQL.org>; Fri, 13 Feb 1998 20:51:03 +0700 (KRS)
+ (envelope-from vadim@sable.krasnoyarsk.su)
+Message-ID: <34E44FBA.D64E7997@sable.krasnoyarsk.su>
+Date: Fri, 13 Feb 1998 20:50:50 +0700
+From: "Vadim B. Mikheev" <vadim@sable.krasnoyarsk.su>
+Organization: ITTS (Krasnoyarsk)
+X-Mailer: Mozilla 4.04 [en] (X11; I; FreeBSD 2.2.5-RELEASE i386)
+MIME-Version: 1.0
+To: PostgreSQL Developers List <hackers@postgreSQL.org>
+Subject: [HACKERS] Subselects are in CVS...
+Content-Type: text/plain; charset=us-ascii
+Content-Transfer-Encoding: 7bit
+Sender: owner-pgsql-hackers@hub.org
+Precedence: bulk
+Status: OR
+
+This is some implementation notes and opened issues...
+
+First, implementation uses new type of parameters - PARAM_EXEC - to deal
+with correlation Vars. When query_planner() is called, it first tries to
+replace all upper queries Var referenced in current query with Param of
+this type. Some global variables are used to keep mapping of Vars to
+Params and Params to Vars.
+
+After this, all current query' SubLinks are processed: for each SubLink
+found in query' qual union_planner() (old planner() function) will be
+called to plan corresponding subselect (union_planner() calls
+query_planner() for "simple" query and supports UNIONs). After subselect
+are planned, optimizer knows about is this correlated, un-correlated or
+_undirect_ correlated (references some grand-parent Vars but no parent
+ones: uncorrelated from the parent' point of view) query.
+
+For uncorrelated and undirect correlated subqueries of EXPRession or
+EXISTS type SubLinks will be replaced with "normal" clauses from
+SubLink->Oper list (I changed this list to be list of EXPR nodes,
+not just Oper ones). Right sides of these nodes are replaced with
+PARAM_EXEC parameters. This is second use of new parameter type.
+At run-time these parameters get value from result of subquery
+evaluation (i.e. - from target list of subquery). Execution plan of
+subquery itself becomes init plan of parent query. InitPlan knows
+what parameters are to get values from subquery' results and will be
+executed "on-demand" (for query select * from table where x > 0 and
+y > (select max(a) from table_a) subquery will not be executed at all
+if there are no tuples with x > 0 _and_ y is not used in index scan).
+
+SubLinks for subqueries of all other types are transformed into
+new type of Expr node - SUBPLAN_EXPR. Expr->args are just correlation
+variables from _parent_ query. Expr->oper is new SubPlan node.
+
+This node is used for InitPlan too. It keeps subquery range table,
+indices of Params which are to get value from _parent_ query Vars
+(i.e. - from Expr->args), indices of Params into which subquery'
+results are to be substituted (this is for InitPlans), SubLink
+and subquery' execution plan.
+
+Plan node was changed to know about dependencies on Params from
+parent queries and InitPlans, to keep list of changed Params
+(from the above) and so be re-scanned if this list is not NULL.
+Also, added list of InitPlans (actually, all of them for current
+query are in topmost plan node now) and other SubPlans (from
+plan->qual) - to initialize them and let them know about changed
+Params (from the list of their "interests").
+
+After all SubLinks are processed, query_planner() calls qual'
+canonificator and does "normal" work. By using Params optimizer
+is mostly unchanged.
+
+Well, Executor. To get subplans re-evaluated without ExecutorStart()
+and ExecutorEnd() (without opening and closing relations and indices
+and without many palloc() and pfree() - this is what SQL-funcs does
+on each call) ExecReScan() now supports most of Plan types...
+
+Explanation of EXPLAIN.
+
+vac=> explain select * from tmp where x >= (select max(x2) from test2
+where y2 = y and exists (select * from tempx where tx = x));
+NOTICE: QUERY PLAN:
+
+Seq Scan on tmp (cost=40.03 size=101 width=8)
+ SubPlan
+ ^^^^^^^ subquery is in Seq Scan' qual, its plan is below
+ -> Aggregate (cost=2.05 size=0 width=0)
+ InitPlan
+ ^^^^^^^^ EXISTS subsubquery is InitPlan of subquery
+ -> Seq Scan on tempx (cost=4.33 size=1 width=4)
+ -> Result (cost=2.05 size=0 width=0)
+ ^^^^^^ EXISTS subsubquery was transformed into Param
+ and so we have Result node here
+ -> Index Scan on test2 (cost=2.05 size=1 width=4)
+
+
+Opened issues.
+
+1. No read permissions checking (easy, just not done yet).
+2. readfuncs.c can't read subplan-s (easy, not critical, because of
+ we currently nowhere use ascii representation of execution plans).
+3. ExecReScan() doesn't support all plan types. At least support for
+ MergeJoin has to be implemented.
+4. Memory leaks in ExecReScan().
+5. I need in advice: if subquery introduced with NOT IN doesn't return
+ any tuples then qualification is failed, yes ?
+6. Regression tests !!!!!!!!!!!!!!!!!!!!
+ (Could we use data/queries from MySQL' crash.me ?
+ Copyright-ed ? Could they give us rights ?)
+7. Performance.
+ - Should be good when subquery is transformed into InitPlan.
+ - Something should be done for uncorrelated subqueries introduced
+ with ANY/ALL - keep thinking. Currently, subplan will be re-scanned
+ for each parent tuple - very slow...
+
+Results of some test. TMP is table with x,y (int4-s), x in 0-9,
+y = 100 - x, 1000 tuples (10 duplicates of each tuple). TEST2 is table
+with x2, y2 (int4-s), x2 in 1-99, y2 = 100 -x2, 10000 tuples (100 dups).
+
+ Trying
+
+select * from tmp where x >= (select max(x2) from test2 where y2 = y);
+
+ and
+
+begin;
+select y as ty, max(x2) as mx into table tsub from test2, tmp
+where y2 = y group by ty;
+vacuum tsub;
+select x, y from tmp, tsub where x >= mx and y = ty;
+drop table tsub;
+end;
+
+ Without index on test2(y2):
+
+SubSelect -> 320 sec
+Using temp table -> 32 sec
+
+ Having index
+
+SubSelect -> 17 sec (2M of memory)
+Using temp table -> 32 sec (12M of memory: -S 8192)
+
+Vadim
diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c
new file mode 100644
index 0000000..34efeee
--- /dev/null
+++ b/src/backend/optimizer/plan/analyzejoins.c
@@ -0,0 +1,1127 @@
+/*-------------------------------------------------------------------------
+ *
+ * analyzejoins.c
+ * Routines for simplifying joins after initial query analysis
+ *
+ * While we do a great deal of join simplification in prep/prepjointree.c,
+ * certain optimizations cannot be performed at that stage for lack of
+ * detailed information about the query. The routines here are invoked
+ * after initsplan.c has done its work, and can do additional join removal
+ * and simplification steps based on the information extracted. The penalty
+ * is that we have to work harder to clean up after ourselves when we modify
+ * the query, since the derived data structures have to be updated too.
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/plan/analyzejoins.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/joininfo.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/planmain.h"
+#include "optimizer/tlist.h"
+#include "utils/lsyscache.h"
+
+/* local functions */
+static bool join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo);
+static void remove_rel_from_query(PlannerInfo *root, int relid,
+ Relids joinrelids);
+static List *remove_rel_from_joinlist(List *joinlist, int relid, int *nremoved);
+static bool rel_supports_distinctness(PlannerInfo *root, RelOptInfo *rel);
+static bool rel_is_distinct_for(PlannerInfo *root, RelOptInfo *rel,
+ List *clause_list);
+static Oid distinct_col_search(int colno, List *colnos, List *opids);
+static bool is_innerrel_unique_for(PlannerInfo *root,
+ Relids joinrelids,
+ Relids outerrelids,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ List *restrictlist);
+
+
+/*
+ * remove_useless_joins
+ * Check for relations that don't actually need to be joined at all,
+ * and remove them from the query.
+ *
+ * We are passed the current joinlist and return the updated list. Other
+ * data structures that have to be updated are accessible via "root".
+ */
+List *
+remove_useless_joins(PlannerInfo *root, List *joinlist)
+{
+ ListCell *lc;
+
+ /*
+ * We are only interested in relations that are left-joined to, so we can
+ * scan the join_info_list to find them easily.
+ */
+restart:
+ foreach(lc, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
+ int innerrelid;
+ int nremoved;
+
+ /* Skip if not removable */
+ if (!join_is_removable(root, sjinfo))
+ continue;
+
+ /*
+ * Currently, join_is_removable can only succeed when the sjinfo's
+ * righthand is a single baserel. Remove that rel from the query and
+ * joinlist.
+ */
+ innerrelid = bms_singleton_member(sjinfo->min_righthand);
+
+ remove_rel_from_query(root, innerrelid,
+ bms_union(sjinfo->min_lefthand,
+ sjinfo->min_righthand));
+
+ /* We verify that exactly one reference gets removed from joinlist */
+ nremoved = 0;
+ joinlist = remove_rel_from_joinlist(joinlist, innerrelid, &nremoved);
+ if (nremoved != 1)
+ elog(ERROR, "failed to find relation %d in joinlist", innerrelid);
+
+ /*
+ * We can delete this SpecialJoinInfo from the list too, since it's no
+ * longer of interest. (Since we'll restart the foreach loop
+ * immediately, we don't bother with foreach_delete_current.)
+ */
+ root->join_info_list = list_delete_cell(root->join_info_list, lc);
+
+ /*
+ * Restart the scan. This is necessary to ensure we find all
+ * removable joins independently of ordering of the join_info_list
+ * (note that removal of attr_needed bits may make a join appear
+ * removable that did not before).
+ */
+ goto restart;
+ }
+
+ return joinlist;
+}
+
+/*
+ * clause_sides_match_join
+ * Determine whether a join clause is of the right form to use in this join.
+ *
+ * We already know that the clause is a binary opclause referencing only the
+ * rels in the current join. The point here is to check whether it has the
+ * form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr",
+ * rather than mixing outer and inner vars on either side. If it matches,
+ * we set the transient flag outer_is_left to identify which side is which.
+ */
+static inline bool
+clause_sides_match_join(RestrictInfo *rinfo, Relids outerrelids,
+ Relids innerrelids)
+{
+ if (bms_is_subset(rinfo->left_relids, outerrelids) &&
+ bms_is_subset(rinfo->right_relids, innerrelids))
+ {
+ /* lefthand side is outer */
+ rinfo->outer_is_left = true;
+ return true;
+ }
+ else if (bms_is_subset(rinfo->left_relids, innerrelids) &&
+ bms_is_subset(rinfo->right_relids, outerrelids))
+ {
+ /* righthand side is outer */
+ rinfo->outer_is_left = false;
+ return true;
+ }
+ return false; /* no good for these input relations */
+}
+
+/*
+ * join_is_removable
+ * Check whether we need not perform this special join at all, because
+ * it will just duplicate its left input.
+ *
+ * This is true for a left join for which the join condition cannot match
+ * more than one inner-side row. (There are other possibly interesting
+ * cases, but we don't have the infrastructure to prove them.) We also
+ * have to check that the inner side doesn't generate any variables needed
+ * above the join.
+ */
+static bool
+join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo)
+{
+ int innerrelid;
+ RelOptInfo *innerrel;
+ Relids joinrelids;
+ List *clause_list = NIL;
+ ListCell *l;
+ int attroff;
+
+ /*
+ * Must be a non-delaying left join to a single baserel, else we aren't
+ * going to be able to do anything with it.
+ */
+ if (sjinfo->jointype != JOIN_LEFT ||
+ sjinfo->delay_upper_joins)
+ return false;
+
+ if (!bms_get_singleton_member(sjinfo->min_righthand, &innerrelid))
+ return false;
+
+ /*
+ * Never try to eliminate a left join to the query result rel. Although
+ * the case is syntactically impossible in standard SQL, MERGE will build
+ * a join tree that looks exactly like that.
+ */
+ if (innerrelid == root->parse->resultRelation)
+ return false;
+
+ innerrel = find_base_rel(root, innerrelid);
+
+ /*
+ * Before we go to the effort of checking whether any innerrel variables
+ * are needed above the join, make a quick check to eliminate cases in
+ * which we will surely be unable to prove uniqueness of the innerrel.
+ */
+ if (!rel_supports_distinctness(root, innerrel))
+ return false;
+
+ /* Compute the relid set for the join we are considering */
+ joinrelids = bms_union(sjinfo->min_lefthand, sjinfo->min_righthand);
+
+ /*
+ * We can't remove the join if any inner-rel attributes are used above the
+ * join.
+ *
+ * Note that this test only detects use of inner-rel attributes in higher
+ * join conditions and the target list. There might be such attributes in
+ * pushed-down conditions at this join, too. We check that case below.
+ *
+ * As a micro-optimization, it seems better to start with max_attr and
+ * count down rather than starting with min_attr and counting up, on the
+ * theory that the system attributes are somewhat less likely to be wanted
+ * and should be tested last.
+ */
+ for (attroff = innerrel->max_attr - innerrel->min_attr;
+ attroff >= 0;
+ attroff--)
+ {
+ if (!bms_is_subset(innerrel->attr_needed[attroff], joinrelids))
+ return false;
+ }
+
+ /*
+ * Similarly check that the inner rel isn't needed by any PlaceHolderVars
+ * that will be used above the join. We only need to fail if such a PHV
+ * actually references some inner-rel attributes; but the correct check
+ * for that is relatively expensive, so we first check against ph_eval_at,
+ * which must mention the inner rel if the PHV uses any inner-rel attrs as
+ * non-lateral references. Note that if the PHV's syntactic scope is just
+ * the inner rel, we can't drop the rel even if the PHV is variable-free.
+ */
+ foreach(l, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l);
+
+ if (bms_overlap(phinfo->ph_lateral, innerrel->relids))
+ return false; /* it references innerrel laterally */
+ if (bms_is_subset(phinfo->ph_needed, joinrelids))
+ continue; /* PHV is not used above the join */
+ if (!bms_overlap(phinfo->ph_eval_at, innerrel->relids))
+ continue; /* it definitely doesn't reference innerrel */
+ if (bms_is_subset(phinfo->ph_eval_at, innerrel->relids))
+ return false; /* there isn't any other place to eval PHV */
+ if (bms_overlap(pull_varnos(root, (Node *) phinfo->ph_var->phexpr),
+ innerrel->relids))
+ return false; /* it does reference innerrel */
+ }
+
+ /*
+ * Search for mergejoinable clauses that constrain the inner rel against
+ * either the outer rel or a pseudoconstant. If an operator is
+ * mergejoinable then it behaves like equality for some btree opclass, so
+ * it's what we want. The mergejoinability test also eliminates clauses
+ * containing volatile functions, which we couldn't depend on.
+ */
+ foreach(l, innerrel->joininfo)
+ {
+ RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
+
+ /*
+ * If it's not a join clause for this outer join, we can't use it.
+ * Note that if the clause is pushed-down, then it is logically from
+ * above the outer join, even if it references no other rels (it might
+ * be from WHERE, for example).
+ */
+ if (RINFO_IS_PUSHED_DOWN(restrictinfo, joinrelids))
+ {
+ /*
+ * If such a clause actually references the inner rel then join
+ * removal has to be disallowed. We have to check this despite
+ * the previous attr_needed checks because of the possibility of
+ * pushed-down clauses referencing the rel.
+ */
+ if (bms_is_member(innerrelid, restrictinfo->clause_relids))
+ return false;
+ continue; /* else, ignore; not useful here */
+ }
+
+ /* Ignore if it's not a mergejoinable clause */
+ if (!restrictinfo->can_join ||
+ restrictinfo->mergeopfamilies == NIL)
+ continue; /* not mergejoinable */
+
+ /*
+ * Check if clause has the form "outer op inner" or "inner op outer",
+ * and if so mark which side is inner.
+ */
+ if (!clause_sides_match_join(restrictinfo, sjinfo->min_lefthand,
+ innerrel->relids))
+ continue; /* no good for these input relations */
+
+ /* OK, add to list */
+ clause_list = lappend(clause_list, restrictinfo);
+ }
+
+ /*
+ * Now that we have the relevant equality join clauses, try to prove the
+ * innerrel distinct.
+ */
+ if (rel_is_distinct_for(root, innerrel, clause_list))
+ return true;
+
+ /*
+ * Some day it would be nice to check for other methods of establishing
+ * distinctness.
+ */
+ return false;
+}
+
+
+/*
+ * Remove the target relid from the planner's data structures, having
+ * determined that there is no need to include it in the query.
+ *
+ * We are not terribly thorough here. We must make sure that the rel is
+ * no longer treated as a baserel, and that attributes of other baserels
+ * are no longer marked as being needed at joins involving this rel.
+ * Also, join quals involving the rel have to be removed from the joininfo
+ * lists, but only if they belong to the outer join identified by joinrelids.
+ */
+static void
+remove_rel_from_query(PlannerInfo *root, int relid, Relids joinrelids)
+{
+ RelOptInfo *rel = find_base_rel(root, relid);
+ List *joininfos;
+ Index rti;
+ ListCell *l;
+
+ /*
+ * Mark the rel as "dead" to show it is no longer part of the join tree.
+ * (Removing it from the baserel array altogether seems too risky.)
+ */
+ rel->reloptkind = RELOPT_DEADREL;
+
+ /*
+ * Remove references to the rel from other baserels' attr_needed arrays.
+ */
+ for (rti = 1; rti < root->simple_rel_array_size; rti++)
+ {
+ RelOptInfo *otherrel = root->simple_rel_array[rti];
+ int attroff;
+
+ /* there may be empty slots corresponding to non-baserel RTEs */
+ if (otherrel == NULL)
+ continue;
+
+ Assert(otherrel->relid == rti); /* sanity check on array */
+
+ /* no point in processing target rel itself */
+ if (otherrel == rel)
+ continue;
+
+ for (attroff = otherrel->max_attr - otherrel->min_attr;
+ attroff >= 0;
+ attroff--)
+ {
+ otherrel->attr_needed[attroff] =
+ bms_del_member(otherrel->attr_needed[attroff], relid);
+ }
+ }
+
+ /*
+ * Likewise remove references from SpecialJoinInfo data structures.
+ *
+ * This is relevant in case the outer join we're deleting is nested inside
+ * other outer joins: the upper joins' relid sets have to be adjusted. The
+ * RHS of the target outer join will be made empty here, but that's OK
+ * since caller will delete that SpecialJoinInfo entirely.
+ */
+ foreach(l, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
+
+ sjinfo->min_lefthand = bms_del_member(sjinfo->min_lefthand, relid);
+ sjinfo->min_righthand = bms_del_member(sjinfo->min_righthand, relid);
+ sjinfo->syn_lefthand = bms_del_member(sjinfo->syn_lefthand, relid);
+ sjinfo->syn_righthand = bms_del_member(sjinfo->syn_righthand, relid);
+ }
+
+ /*
+ * Likewise remove references from PlaceHolderVar data structures,
+ * removing any no-longer-needed placeholders entirely.
+ *
+ * Removal is a bit trickier than it might seem: we can remove PHVs that
+ * are used at the target rel and/or in the join qual, but not those that
+ * are used at join partner rels or above the join. It's not that easy to
+ * distinguish PHVs used at partner rels from those used in the join qual,
+ * since they will both have ph_needed sets that are subsets of
+ * joinrelids. However, a PHV used at a partner rel could not have the
+ * target rel in ph_eval_at, so we check that while deciding whether to
+ * remove or just update the PHV. There is no corresponding test in
+ * join_is_removable because it doesn't need to distinguish those cases.
+ */
+ foreach(l, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l);
+
+ Assert(!bms_is_member(relid, phinfo->ph_lateral));
+ if (bms_is_subset(phinfo->ph_needed, joinrelids) &&
+ bms_is_member(relid, phinfo->ph_eval_at))
+ root->placeholder_list = foreach_delete_current(root->placeholder_list,
+ l);
+ else
+ {
+ phinfo->ph_eval_at = bms_del_member(phinfo->ph_eval_at, relid);
+ Assert(!bms_is_empty(phinfo->ph_eval_at));
+ phinfo->ph_needed = bms_del_member(phinfo->ph_needed, relid);
+ }
+ }
+
+ /*
+ * Remove any joinquals referencing the rel from the joininfo lists.
+ *
+ * In some cases, a joinqual has to be put back after deleting its
+ * reference to the target rel. This can occur for pseudoconstant and
+ * outerjoin-delayed quals, which can get marked as requiring the rel in
+ * order to force them to be evaluated at or above the join. We can't
+ * just discard them, though. Only quals that logically belonged to the
+ * outer join being discarded should be removed from the query.
+ *
+ * We must make a copy of the rel's old joininfo list before starting the
+ * loop, because otherwise remove_join_clause_from_rels would destroy the
+ * list while we're scanning it.
+ */
+ joininfos = list_copy(rel->joininfo);
+ foreach(l, joininfos)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
+
+ remove_join_clause_from_rels(root, rinfo, rinfo->required_relids);
+
+ if (RINFO_IS_PUSHED_DOWN(rinfo, joinrelids))
+ {
+ /* Recheck that qual doesn't actually reference the target rel */
+ Assert(!bms_is_member(relid, rinfo->clause_relids));
+
+ /*
+ * The required_relids probably aren't shared with anything else,
+ * but let's copy them just to be sure.
+ */
+ rinfo->required_relids = bms_copy(rinfo->required_relids);
+ rinfo->required_relids = bms_del_member(rinfo->required_relids,
+ relid);
+ distribute_restrictinfo_to_rels(root, rinfo);
+ }
+ }
+
+ /*
+ * There may be references to the rel in root->fkey_list, but if so,
+ * match_foreign_keys_to_quals() will get rid of them.
+ */
+}
+
+/*
+ * Remove any occurrences of the target relid from a joinlist structure.
+ *
+ * It's easiest to build a whole new list structure, so we handle it that
+ * way. Efficiency is not a big deal here.
+ *
+ * *nremoved is incremented by the number of occurrences removed (there
+ * should be exactly one, but the caller checks that).
+ */
+static List *
+remove_rel_from_joinlist(List *joinlist, int relid, int *nremoved)
+{
+ List *result = NIL;
+ ListCell *jl;
+
+ foreach(jl, joinlist)
+ {
+ Node *jlnode = (Node *) lfirst(jl);
+
+ if (IsA(jlnode, RangeTblRef))
+ {
+ int varno = ((RangeTblRef *) jlnode)->rtindex;
+
+ if (varno == relid)
+ (*nremoved)++;
+ else
+ result = lappend(result, jlnode);
+ }
+ else if (IsA(jlnode, List))
+ {
+ /* Recurse to handle subproblem */
+ List *sublist;
+
+ sublist = remove_rel_from_joinlist((List *) jlnode,
+ relid, nremoved);
+ /* Avoid including empty sub-lists in the result */
+ if (sublist)
+ result = lappend(result, sublist);
+ }
+ else
+ {
+ elog(ERROR, "unrecognized joinlist node type: %d",
+ (int) nodeTag(jlnode));
+ }
+ }
+
+ return result;
+}
+
+
+/*
+ * reduce_unique_semijoins
+ * Check for semijoins that can be simplified to plain inner joins
+ * because the inner relation is provably unique for the join clauses.
+ *
+ * Ideally this would happen during reduce_outer_joins, but we don't have
+ * enough information at that point.
+ *
+ * To perform the strength reduction when applicable, we need only delete
+ * the semijoin's SpecialJoinInfo from root->join_info_list. (We don't
+ * bother fixing the join type attributed to it in the query jointree,
+ * since that won't be consulted again.)
+ */
+void
+reduce_unique_semijoins(PlannerInfo *root)
+{
+ ListCell *lc;
+
+ /*
+ * Scan the join_info_list to find semijoins.
+ */
+ foreach(lc, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
+ int innerrelid;
+ RelOptInfo *innerrel;
+ Relids joinrelids;
+ List *restrictlist;
+
+ /*
+ * Must be a non-delaying semijoin to a single baserel, else we aren't
+ * going to be able to do anything with it. (It's probably not
+ * possible for delay_upper_joins to be set on a semijoin, but we
+ * might as well check.)
+ */
+ if (sjinfo->jointype != JOIN_SEMI ||
+ sjinfo->delay_upper_joins)
+ continue;
+
+ if (!bms_get_singleton_member(sjinfo->min_righthand, &innerrelid))
+ continue;
+
+ innerrel = find_base_rel(root, innerrelid);
+
+ /*
+ * Before we trouble to run generate_join_implied_equalities, make a
+ * quick check to eliminate cases in which we will surely be unable to
+ * prove uniqueness of the innerrel.
+ */
+ if (!rel_supports_distinctness(root, innerrel))
+ continue;
+
+ /* Compute the relid set for the join we are considering */
+ joinrelids = bms_union(sjinfo->min_lefthand, sjinfo->min_righthand);
+
+ /*
+ * Since we're only considering a single-rel RHS, any join clauses it
+ * has must be clauses linking it to the semijoin's min_lefthand. We
+ * can also consider EC-derived join clauses.
+ */
+ restrictlist =
+ list_concat(generate_join_implied_equalities(root,
+ joinrelids,
+ sjinfo->min_lefthand,
+ innerrel),
+ innerrel->joininfo);
+
+ /* Test whether the innerrel is unique for those clauses. */
+ if (!innerrel_is_unique(root,
+ joinrelids, sjinfo->min_lefthand, innerrel,
+ JOIN_SEMI, restrictlist, true))
+ continue;
+
+ /* OK, remove the SpecialJoinInfo from the list. */
+ root->join_info_list = foreach_delete_current(root->join_info_list, lc);
+ }
+}
+
+
+/*
+ * rel_supports_distinctness
+ * Could the relation possibly be proven distinct on some set of columns?
+ *
+ * This is effectively a pre-checking function for rel_is_distinct_for().
+ * It must return true if rel_is_distinct_for() could possibly return true
+ * with this rel, but it should not expend a lot of cycles. The idea is
+ * that callers can avoid doing possibly-expensive processing to compute
+ * rel_is_distinct_for()'s argument lists if the call could not possibly
+ * succeed.
+ */
+static bool
+rel_supports_distinctness(PlannerInfo *root, RelOptInfo *rel)
+{
+ /* We only know about baserels ... */
+ if (rel->reloptkind != RELOPT_BASEREL)
+ return false;
+ if (rel->rtekind == RTE_RELATION)
+ {
+ /*
+ * For a plain relation, we only know how to prove uniqueness by
+ * reference to unique indexes. Make sure there's at least one
+ * suitable unique index. It must be immediately enforced, and not a
+ * partial index. (Keep these conditions in sync with
+ * relation_has_unique_index_for!)
+ */
+ ListCell *lc;
+
+ foreach(lc, rel->indexlist)
+ {
+ IndexOptInfo *ind = (IndexOptInfo *) lfirst(lc);
+
+ if (ind->unique && ind->immediate && ind->indpred == NIL)
+ return true;
+ }
+ }
+ else if (rel->rtekind == RTE_SUBQUERY)
+ {
+ Query *subquery = root->simple_rte_array[rel->relid]->subquery;
+
+ /* Check if the subquery has any qualities that support distinctness */
+ if (query_supports_distinctness(subquery))
+ return true;
+ }
+ /* We have no proof rules for any other rtekinds. */
+ return false;
+}
+
+/*
+ * rel_is_distinct_for
+ * Does the relation return only distinct rows according to clause_list?
+ *
+ * clause_list is a list of join restriction clauses involving this rel and
+ * some other one. Return true if no two rows emitted by this rel could
+ * possibly join to the same row of the other rel.
+ *
+ * The caller must have already determined that each condition is a
+ * mergejoinable equality with an expression in this relation on one side, and
+ * an expression not involving this relation on the other. The transient
+ * outer_is_left flag is used to identify which side references this relation:
+ * left side if outer_is_left is false, right side if it is true.
+ *
+ * Note that the passed-in clause_list may be destructively modified! This
+ * is OK for current uses, because the clause_list is built by the caller for
+ * the sole purpose of passing to this function.
+ */
+static bool
+rel_is_distinct_for(PlannerInfo *root, RelOptInfo *rel, List *clause_list)
+{
+ /*
+ * We could skip a couple of tests here if we assume all callers checked
+ * rel_supports_distinctness first, but it doesn't seem worth taking any
+ * risk for.
+ */
+ if (rel->reloptkind != RELOPT_BASEREL)
+ return false;
+ if (rel->rtekind == RTE_RELATION)
+ {
+ /*
+ * Examine the indexes to see if we have a matching unique index.
+ * relation_has_unique_index_for automatically adds any usable
+ * restriction clauses for the rel, so we needn't do that here.
+ */
+ if (relation_has_unique_index_for(root, rel, clause_list, NIL, NIL))
+ return true;
+ }
+ else if (rel->rtekind == RTE_SUBQUERY)
+ {
+ Index relid = rel->relid;
+ Query *subquery = root->simple_rte_array[relid]->subquery;
+ List *colnos = NIL;
+ List *opids = NIL;
+ ListCell *l;
+
+ /*
+ * Build the argument lists for query_is_distinct_for: a list of
+ * output column numbers that the query needs to be distinct over, and
+ * a list of equality operators that the output columns need to be
+ * distinct according to.
+ *
+ * (XXX we are not considering restriction clauses attached to the
+ * subquery; is that worth doing?)
+ */
+ foreach(l, clause_list)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+ Oid op;
+ Var *var;
+
+ /*
+ * Get the equality operator we need uniqueness according to.
+ * (This might be a cross-type operator and thus not exactly the
+ * same operator the subquery would consider; that's all right
+ * since query_is_distinct_for can resolve such cases.) The
+ * caller's mergejoinability test should have selected only
+ * OpExprs.
+ */
+ op = castNode(OpExpr, rinfo->clause)->opno;
+
+ /* caller identified the inner side for us */
+ if (rinfo->outer_is_left)
+ var = (Var *) get_rightop(rinfo->clause);
+ else
+ var = (Var *) get_leftop(rinfo->clause);
+
+ /*
+ * We may ignore any RelabelType node above the operand. (There
+ * won't be more than one, since eval_const_expressions() has been
+ * applied already.)
+ */
+ if (var && IsA(var, RelabelType))
+ var = (Var *) ((RelabelType *) var)->arg;
+
+ /*
+ * If inner side isn't a Var referencing a subquery output column,
+ * this clause doesn't help us.
+ */
+ if (!var || !IsA(var, Var) ||
+ var->varno != relid || var->varlevelsup != 0)
+ continue;
+
+ colnos = lappend_int(colnos, var->varattno);
+ opids = lappend_oid(opids, op);
+ }
+
+ if (query_is_distinct_for(subquery, colnos, opids))
+ return true;
+ }
+ return false;
+}
+
+
+/*
+ * query_supports_distinctness - could the query possibly be proven distinct
+ * on some set of output columns?
+ *
+ * This is effectively a pre-checking function for query_is_distinct_for().
+ * It must return true if query_is_distinct_for() could possibly return true
+ * with this query, but it should not expend a lot of cycles. The idea is
+ * that callers can avoid doing possibly-expensive processing to compute
+ * query_is_distinct_for()'s argument lists if the call could not possibly
+ * succeed.
+ */
+bool
+query_supports_distinctness(Query *query)
+{
+ /* SRFs break distinctness except with DISTINCT, see below */
+ if (query->hasTargetSRFs && query->distinctClause == NIL)
+ return false;
+
+ /* check for features we can prove distinctness with */
+ if (query->distinctClause != NIL ||
+ query->groupClause != NIL ||
+ query->groupingSets != NIL ||
+ query->hasAggs ||
+ query->havingQual ||
+ query->setOperations)
+ return true;
+
+ return false;
+}
+
+/*
+ * query_is_distinct_for - does query never return duplicates of the
+ * specified columns?
+ *
+ * query is a not-yet-planned subquery (in current usage, it's always from
+ * a subquery RTE, which the planner avoids scribbling on).
+ *
+ * colnos is an integer list of output column numbers (resno's). We are
+ * interested in whether rows consisting of just these columns are certain
+ * to be distinct. "Distinctness" is defined according to whether the
+ * corresponding upper-level equality operators listed in opids would think
+ * the values are distinct. (Note: the opids entries could be cross-type
+ * operators, and thus not exactly the equality operators that the subquery
+ * would use itself. We use equality_ops_are_compatible() to check
+ * compatibility. That looks at btree or hash opfamily membership, and so
+ * should give trustworthy answers for all operators that we might need
+ * to deal with here.)
+ */
+bool
+query_is_distinct_for(Query *query, List *colnos, List *opids)
+{
+ ListCell *l;
+ Oid opid;
+
+ Assert(list_length(colnos) == list_length(opids));
+
+ /*
+ * DISTINCT (including DISTINCT ON) guarantees uniqueness if all the
+ * columns in the DISTINCT clause appear in colnos and operator semantics
+ * match. This is true even if there are SRFs in the DISTINCT columns or
+ * elsewhere in the tlist.
+ */
+ if (query->distinctClause)
+ {
+ foreach(l, query->distinctClause)
+ {
+ SortGroupClause *sgc = (SortGroupClause *) lfirst(l);
+ TargetEntry *tle = get_sortgroupclause_tle(sgc,
+ query->targetList);
+
+ opid = distinct_col_search(tle->resno, colnos, opids);
+ if (!OidIsValid(opid) ||
+ !equality_ops_are_compatible(opid, sgc->eqop))
+ break; /* exit early if no match */
+ }
+ if (l == NULL) /* had matches for all? */
+ return true;
+ }
+
+ /*
+ * Otherwise, a set-returning function in the query's targetlist can
+ * result in returning duplicate rows, despite any grouping that might
+ * occur before tlist evaluation. (If all tlist SRFs are within GROUP BY
+ * columns, it would be safe because they'd be expanded before grouping.
+ * But it doesn't currently seem worth the effort to check for that.)
+ */
+ if (query->hasTargetSRFs)
+ return false;
+
+ /*
+ * Similarly, GROUP BY without GROUPING SETS guarantees uniqueness if all
+ * the grouped columns appear in colnos and operator semantics match.
+ */
+ if (query->groupClause && !query->groupingSets)
+ {
+ foreach(l, query->groupClause)
+ {
+ SortGroupClause *sgc = (SortGroupClause *) lfirst(l);
+ TargetEntry *tle = get_sortgroupclause_tle(sgc,
+ query->targetList);
+
+ opid = distinct_col_search(tle->resno, colnos, opids);
+ if (!OidIsValid(opid) ||
+ !equality_ops_are_compatible(opid, sgc->eqop))
+ break; /* exit early if no match */
+ }
+ if (l == NULL) /* had matches for all? */
+ return true;
+ }
+ else if (query->groupingSets)
+ {
+ /*
+ * If we have grouping sets with expressions, we probably don't have
+ * uniqueness and analysis would be hard. Punt.
+ */
+ if (query->groupClause)
+ return false;
+
+ /*
+ * If we have no groupClause (therefore no grouping expressions), we
+ * might have one or many empty grouping sets. If there's just one,
+ * then we're returning only one row and are certainly unique. But
+ * otherwise, we know we're certainly not unique.
+ */
+ if (list_length(query->groupingSets) == 1 &&
+ ((GroupingSet *) linitial(query->groupingSets))->kind == GROUPING_SET_EMPTY)
+ return true;
+ else
+ return false;
+ }
+ else
+ {
+ /*
+ * If we have no GROUP BY, but do have aggregates or HAVING, then the
+ * result is at most one row so it's surely unique, for any operators.
+ */
+ if (query->hasAggs || query->havingQual)
+ return true;
+ }
+
+ /*
+ * UNION, INTERSECT, EXCEPT guarantee uniqueness of the whole output row,
+ * except with ALL.
+ */
+ if (query->setOperations)
+ {
+ SetOperationStmt *topop = castNode(SetOperationStmt, query->setOperations);
+
+ Assert(topop->op != SETOP_NONE);
+
+ if (!topop->all)
+ {
+ ListCell *lg;
+
+ /* We're good if all the nonjunk output columns are in colnos */
+ lg = list_head(topop->groupClauses);
+ foreach(l, query->targetList)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+ SortGroupClause *sgc;
+
+ if (tle->resjunk)
+ continue; /* ignore resjunk columns */
+
+ /* non-resjunk columns should have grouping clauses */
+ Assert(lg != NULL);
+ sgc = (SortGroupClause *) lfirst(lg);
+ lg = lnext(topop->groupClauses, lg);
+
+ opid = distinct_col_search(tle->resno, colnos, opids);
+ if (!OidIsValid(opid) ||
+ !equality_ops_are_compatible(opid, sgc->eqop))
+ break; /* exit early if no match */
+ }
+ if (l == NULL) /* had matches for all? */
+ return true;
+ }
+ }
+
+ /*
+ * XXX Are there any other cases in which we can easily see the result
+ * must be distinct?
+ *
+ * If you do add more smarts to this function, be sure to update
+ * query_supports_distinctness() to match.
+ */
+
+ return false;
+}
+
+/*
+ * distinct_col_search - subroutine for query_is_distinct_for
+ *
+ * If colno is in colnos, return the corresponding element of opids,
+ * else return InvalidOid. (Ordinarily colnos would not contain duplicates,
+ * but if it does, we arbitrarily select the first match.)
+ */
+static Oid
+distinct_col_search(int colno, List *colnos, List *opids)
+{
+ ListCell *lc1,
+ *lc2;
+
+ forboth(lc1, colnos, lc2, opids)
+ {
+ if (colno == lfirst_int(lc1))
+ return lfirst_oid(lc2);
+ }
+ return InvalidOid;
+}
+
+
+/*
+ * innerrel_is_unique
+ * Check if the innerrel provably contains at most one tuple matching any
+ * tuple from the outerrel, based on join clauses in the 'restrictlist'.
+ *
+ * We need an actual RelOptInfo for the innerrel, but it's sufficient to
+ * identify the outerrel by its Relids. This asymmetry supports use of this
+ * function before joinrels have been built. (The caller is expected to
+ * also supply the joinrelids, just to save recalculating that.)
+ *
+ * The proof must be made based only on clauses that will be "joinquals"
+ * rather than "otherquals" at execution. For an inner join there's no
+ * difference; but if the join is outer, we must ignore pushed-down quals,
+ * as those will become "otherquals". Note that this means the answer might
+ * vary depending on whether IS_OUTER_JOIN(jointype); since we cache the
+ * answer without regard to that, callers must take care not to call this
+ * with jointypes that would be classified differently by IS_OUTER_JOIN().
+ *
+ * The actual proof is undertaken by is_innerrel_unique_for(); this function
+ * is a frontend that is mainly concerned with caching the answers.
+ * In particular, the force_cache argument allows overriding the internal
+ * heuristic about whether to cache negative answers; it should be "true"
+ * if making an inquiry that is not part of the normal bottom-up join search
+ * sequence.
+ */
+bool
+innerrel_is_unique(PlannerInfo *root,
+ Relids joinrelids,
+ Relids outerrelids,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ List *restrictlist,
+ bool force_cache)
+{
+ MemoryContext old_context;
+ ListCell *lc;
+
+ /* Certainly can't prove uniqueness when there are no joinclauses */
+ if (restrictlist == NIL)
+ return false;
+
+ /*
+ * Make a quick check to eliminate cases in which we will surely be unable
+ * to prove uniqueness of the innerrel.
+ */
+ if (!rel_supports_distinctness(root, innerrel))
+ return false;
+
+ /*
+ * Query the cache to see if we've managed to prove that innerrel is
+ * unique for any subset of this outerrel. We don't need an exact match,
+ * as extra outerrels can't make the innerrel any less unique (or more
+ * formally, the restrictlist for a join to a superset outerrel must be a
+ * superset of the conditions we successfully used before).
+ */
+ foreach(lc, innerrel->unique_for_rels)
+ {
+ Relids unique_for_rels = (Relids) lfirst(lc);
+
+ if (bms_is_subset(unique_for_rels, outerrelids))
+ return true; /* Success! */
+ }
+
+ /*
+ * Conversely, we may have already determined that this outerrel, or some
+ * superset thereof, cannot prove this innerrel to be unique.
+ */
+ foreach(lc, innerrel->non_unique_for_rels)
+ {
+ Relids unique_for_rels = (Relids) lfirst(lc);
+
+ if (bms_is_subset(outerrelids, unique_for_rels))
+ return false;
+ }
+
+ /* No cached information, so try to make the proof. */
+ if (is_innerrel_unique_for(root, joinrelids, outerrelids, innerrel,
+ jointype, restrictlist))
+ {
+ /*
+ * Cache the positive result for future probes, being sure to keep it
+ * in the planner_cxt even if we are working in GEQO.
+ *
+ * Note: one might consider trying to isolate the minimal subset of
+ * the outerrels that proved the innerrel unique. But it's not worth
+ * the trouble, because the planner builds up joinrels incrementally
+ * and so we'll see the minimally sufficient outerrels before any
+ * supersets of them anyway.
+ */
+ old_context = MemoryContextSwitchTo(root->planner_cxt);
+ innerrel->unique_for_rels = lappend(innerrel->unique_for_rels,
+ bms_copy(outerrelids));
+ MemoryContextSwitchTo(old_context);
+
+ return true; /* Success! */
+ }
+ else
+ {
+ /*
+ * None of the join conditions for outerrel proved innerrel unique, so
+ * we can safely reject this outerrel or any subset of it in future
+ * checks.
+ *
+ * However, in normal planning mode, caching this knowledge is totally
+ * pointless; it won't be queried again, because we build up joinrels
+ * from smaller to larger. It is useful in GEQO mode, where the
+ * knowledge can be carried across successive planning attempts; and
+ * it's likely to be useful when using join-search plugins, too. Hence
+ * cache when join_search_private is non-NULL. (Yeah, that's a hack,
+ * but it seems reasonable.)
+ *
+ * Also, allow callers to override that heuristic and force caching;
+ * that's useful for reduce_unique_semijoins, which calls here before
+ * the normal join search starts.
+ */
+ if (force_cache || root->join_search_private)
+ {
+ old_context = MemoryContextSwitchTo(root->planner_cxt);
+ innerrel->non_unique_for_rels =
+ lappend(innerrel->non_unique_for_rels,
+ bms_copy(outerrelids));
+ MemoryContextSwitchTo(old_context);
+ }
+
+ return false;
+ }
+}
+
+/*
+ * is_innerrel_unique_for
+ * Check if the innerrel provably contains at most one tuple matching any
+ * tuple from the outerrel, based on join clauses in the 'restrictlist'.
+ */
+static bool
+is_innerrel_unique_for(PlannerInfo *root,
+ Relids joinrelids,
+ Relids outerrelids,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ List *restrictlist)
+{
+ List *clause_list = NIL;
+ ListCell *lc;
+
+ /*
+ * Search for mergejoinable clauses that constrain the inner rel against
+ * the outer rel. If an operator is mergejoinable then it behaves like
+ * equality for some btree opclass, so it's what we want. The
+ * mergejoinability test also eliminates clauses containing volatile
+ * functions, which we couldn't depend on.
+ */
+ foreach(lc, restrictlist)
+ {
+ RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(lc);
+
+ /*
+ * As noted above, if it's a pushed-down clause and we're at an outer
+ * join, we can't use it.
+ */
+ if (IS_OUTER_JOIN(jointype) &&
+ RINFO_IS_PUSHED_DOWN(restrictinfo, joinrelids))
+ continue;
+
+ /* Ignore if it's not a mergejoinable clause */
+ if (!restrictinfo->can_join ||
+ restrictinfo->mergeopfamilies == NIL)
+ continue; /* not mergejoinable */
+
+ /*
+ * Check if clause has the form "outer op inner" or "inner op outer",
+ * and if so mark which side is inner.
+ */
+ if (!clause_sides_match_join(restrictinfo, outerrelids,
+ innerrel->relids))
+ continue; /* no good for these input relations */
+
+ /* OK, add to list */
+ clause_list = lappend(clause_list, restrictinfo);
+ }
+
+ /* Let rel_is_distinct_for() do the hard work */
+ return rel_is_distinct_for(root, innerrel, clause_list);
+}
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
new file mode 100644
index 0000000..1bc59c9
--- /dev/null
+++ b/src/backend/optimizer/plan/createplan.c
@@ -0,0 +1,7244 @@
+/*-------------------------------------------------------------------------
+ *
+ * createplan.c
+ * Routines to create the desired plan for processing a query.
+ * Planning is complete, we just need to convert the selected
+ * Path into a Plan.
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/plan/createplan.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <math.h>
+
+#include "access/sysattr.h"
+#include "catalog/pg_class.h"
+#include "foreign/fdwapi.h"
+#include "miscadmin.h"
+#include "nodes/extensible.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/paramassign.h"
+#include "optimizer/paths.h"
+#include "optimizer/placeholder.h"
+#include "optimizer/plancat.h"
+#include "optimizer/planmain.h"
+#include "optimizer/prep.h"
+#include "optimizer/restrictinfo.h"
+#include "optimizer/subselect.h"
+#include "optimizer/tlist.h"
+#include "parser/parse_clause.h"
+#include "parser/parsetree.h"
+#include "partitioning/partprune.h"
+#include "utils/lsyscache.h"
+
+
+/*
+ * Flag bits that can appear in the flags argument of create_plan_recurse().
+ * These can be OR-ed together.
+ *
+ * CP_EXACT_TLIST specifies that the generated plan node must return exactly
+ * the tlist specified by the path's pathtarget (this overrides both
+ * CP_SMALL_TLIST and CP_LABEL_TLIST, if those are set). Otherwise, the
+ * plan node is allowed to return just the Vars and PlaceHolderVars needed
+ * to evaluate the pathtarget.
+ *
+ * CP_SMALL_TLIST specifies that a narrower tlist is preferred. This is
+ * passed down by parent nodes such as Sort and Hash, which will have to
+ * store the returned tuples.
+ *
+ * CP_LABEL_TLIST specifies that the plan node must return columns matching
+ * any sortgrouprefs specified in its pathtarget, with appropriate
+ * ressortgroupref labels. This is passed down by parent nodes such as Sort
+ * and Group, which need these values to be available in their inputs.
+ *
+ * CP_IGNORE_TLIST specifies that the caller plans to replace the targetlist,
+ * and therefore it doesn't matter a bit what target list gets generated.
+ */
+#define CP_EXACT_TLIST 0x0001 /* Plan must return specified tlist */
+#define CP_SMALL_TLIST 0x0002 /* Prefer narrower tlists */
+#define CP_LABEL_TLIST 0x0004 /* tlist must contain sortgrouprefs */
+#define CP_IGNORE_TLIST 0x0008 /* caller will replace tlist */
+
+
+static Plan *create_plan_recurse(PlannerInfo *root, Path *best_path,
+ int flags);
+static Plan *create_scan_plan(PlannerInfo *root, Path *best_path,
+ int flags);
+static List *build_path_tlist(PlannerInfo *root, Path *path);
+static bool use_physical_tlist(PlannerInfo *root, Path *path, int flags);
+static List *get_gating_quals(PlannerInfo *root, List *quals);
+static Plan *create_gating_plan(PlannerInfo *root, Path *path, Plan *plan,
+ List *gating_quals);
+static Plan *create_join_plan(PlannerInfo *root, JoinPath *best_path);
+static bool mark_async_capable_plan(Plan *plan, Path *path);
+static Plan *create_append_plan(PlannerInfo *root, AppendPath *best_path,
+ int flags);
+static Plan *create_merge_append_plan(PlannerInfo *root, MergeAppendPath *best_path,
+ int flags);
+static Result *create_group_result_plan(PlannerInfo *root,
+ GroupResultPath *best_path);
+static ProjectSet *create_project_set_plan(PlannerInfo *root, ProjectSetPath *best_path);
+static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path,
+ int flags);
+static Memoize *create_memoize_plan(PlannerInfo *root, MemoizePath *best_path,
+ int flags);
+static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path,
+ int flags);
+static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path);
+static Plan *create_projection_plan(PlannerInfo *root,
+ ProjectionPath *best_path,
+ int flags);
+static Plan *inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe);
+static Sort *create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags);
+static IncrementalSort *create_incrementalsort_plan(PlannerInfo *root,
+ IncrementalSortPath *best_path, int flags);
+static Group *create_group_plan(PlannerInfo *root, GroupPath *best_path);
+static Unique *create_upper_unique_plan(PlannerInfo *root, UpperUniquePath *best_path,
+ int flags);
+static Agg *create_agg_plan(PlannerInfo *root, AggPath *best_path);
+static Plan *create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path);
+static Result *create_minmaxagg_plan(PlannerInfo *root, MinMaxAggPath *best_path);
+static WindowAgg *create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path);
+static SetOp *create_setop_plan(PlannerInfo *root, SetOpPath *best_path,
+ int flags);
+static RecursiveUnion *create_recursiveunion_plan(PlannerInfo *root, RecursiveUnionPath *best_path);
+static LockRows *create_lockrows_plan(PlannerInfo *root, LockRowsPath *best_path,
+ int flags);
+static ModifyTable *create_modifytable_plan(PlannerInfo *root, ModifyTablePath *best_path);
+static Limit *create_limit_plan(PlannerInfo *root, LimitPath *best_path,
+ int flags);
+static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses);
+static SampleScan *create_samplescan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses);
+static Scan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path,
+ List *tlist, List *scan_clauses, bool indexonly);
+static BitmapHeapScan *create_bitmap_scan_plan(PlannerInfo *root,
+ BitmapHeapPath *best_path,
+ List *tlist, List *scan_clauses);
+static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
+ List **qual, List **indexqual, List **indexECs);
+static void bitmap_subplan_mark_shared(Plan *plan);
+static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
+ List *tlist, List *scan_clauses);
+static TidRangeScan *create_tidrangescan_plan(PlannerInfo *root,
+ TidRangePath *best_path,
+ List *tlist,
+ List *scan_clauses);
+static SubqueryScan *create_subqueryscan_plan(PlannerInfo *root,
+ SubqueryScanPath *best_path,
+ List *tlist, List *scan_clauses);
+static FunctionScan *create_functionscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses);
+static ValuesScan *create_valuesscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses);
+static TableFuncScan *create_tablefuncscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses);
+static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses);
+static NamedTuplestoreScan *create_namedtuplestorescan_plan(PlannerInfo *root,
+ Path *best_path, List *tlist, List *scan_clauses);
+static Result *create_resultscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses);
+static WorkTableScan *create_worktablescan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses);
+static ForeignScan *create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
+ List *tlist, List *scan_clauses);
+static CustomScan *create_customscan_plan(PlannerInfo *root,
+ CustomPath *best_path,
+ List *tlist, List *scan_clauses);
+static NestLoop *create_nestloop_plan(PlannerInfo *root, NestPath *best_path);
+static MergeJoin *create_mergejoin_plan(PlannerInfo *root, MergePath *best_path);
+static HashJoin *create_hashjoin_plan(PlannerInfo *root, HashPath *best_path);
+static Node *replace_nestloop_params(PlannerInfo *root, Node *expr);
+static Node *replace_nestloop_params_mutator(Node *node, PlannerInfo *root);
+static void fix_indexqual_references(PlannerInfo *root, IndexPath *index_path,
+ List **stripped_indexquals_p,
+ List **fixed_indexquals_p);
+static List *fix_indexorderby_references(PlannerInfo *root, IndexPath *index_path);
+static Node *fix_indexqual_clause(PlannerInfo *root,
+ IndexOptInfo *index, int indexcol,
+ Node *clause, List *indexcolnos);
+static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index, int indexcol);
+static List *get_switched_clauses(List *clauses, Relids outerrelids);
+static List *order_qual_clauses(PlannerInfo *root, List *clauses);
+static void copy_generic_path_info(Plan *dest, Path *src);
+static void copy_plan_costsize(Plan *dest, Plan *src);
+static void label_sort_with_costsize(PlannerInfo *root, Sort *plan,
+ double limit_tuples);
+static SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid);
+static SampleScan *make_samplescan(List *qptlist, List *qpqual, Index scanrelid,
+ TableSampleClause *tsc);
+static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
+ Oid indexid, List *indexqual, List *indexqualorig,
+ List *indexorderby, List *indexorderbyorig,
+ List *indexorderbyops,
+ ScanDirection indexscandir);
+static IndexOnlyScan *make_indexonlyscan(List *qptlist, List *qpqual,
+ Index scanrelid, Oid indexid,
+ List *indexqual, List *recheckqual,
+ List *indexorderby,
+ List *indextlist,
+ ScanDirection indexscandir);
+static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid,
+ List *indexqual,
+ List *indexqualorig);
+static BitmapHeapScan *make_bitmap_heapscan(List *qptlist,
+ List *qpqual,
+ Plan *lefttree,
+ List *bitmapqualorig,
+ Index scanrelid);
+static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
+ List *tidquals);
+static TidRangeScan *make_tidrangescan(List *qptlist, List *qpqual,
+ Index scanrelid, List *tidrangequals);
+static SubqueryScan *make_subqueryscan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ Plan *subplan);
+static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
+ Index scanrelid, List *functions, bool funcordinality);
+static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
+ Index scanrelid, List *values_lists);
+static TableFuncScan *make_tablefuncscan(List *qptlist, List *qpqual,
+ Index scanrelid, TableFunc *tablefunc);
+static CteScan *make_ctescan(List *qptlist, List *qpqual,
+ Index scanrelid, int ctePlanId, int cteParam);
+static NamedTuplestoreScan *make_namedtuplestorescan(List *qptlist, List *qpqual,
+ Index scanrelid, char *enrname);
+static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual,
+ Index scanrelid, int wtParam);
+static RecursiveUnion *make_recursive_union(List *tlist,
+ Plan *lefttree,
+ Plan *righttree,
+ int wtParam,
+ List *distinctList,
+ long numGroups);
+static BitmapAnd *make_bitmap_and(List *bitmapplans);
+static BitmapOr *make_bitmap_or(List *bitmapplans);
+static NestLoop *make_nestloop(List *tlist,
+ List *joinclauses, List *otherclauses, List *nestParams,
+ Plan *lefttree, Plan *righttree,
+ JoinType jointype, bool inner_unique);
+static HashJoin *make_hashjoin(List *tlist,
+ List *joinclauses, List *otherclauses,
+ List *hashclauses,
+ List *hashoperators, List *hashcollations,
+ List *hashkeys,
+ Plan *lefttree, Plan *righttree,
+ JoinType jointype, bool inner_unique);
+static Hash *make_hash(Plan *lefttree,
+ List *hashkeys,
+ Oid skewTable,
+ AttrNumber skewColumn,
+ bool skewInherit);
+static MergeJoin *make_mergejoin(List *tlist,
+ List *joinclauses, List *otherclauses,
+ List *mergeclauses,
+ Oid *mergefamilies,
+ Oid *mergecollations,
+ int *mergestrategies,
+ bool *mergenullsfirst,
+ Plan *lefttree, Plan *righttree,
+ JoinType jointype, bool inner_unique,
+ bool skip_mark_restore);
+static Sort *make_sort(Plan *lefttree, int numCols,
+ AttrNumber *sortColIdx, Oid *sortOperators,
+ Oid *collations, bool *nullsFirst);
+static IncrementalSort *make_incrementalsort(Plan *lefttree,
+ int numCols, int nPresortedCols,
+ AttrNumber *sortColIdx, Oid *sortOperators,
+ Oid *collations, bool *nullsFirst);
+static Plan *prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
+ Relids relids,
+ const AttrNumber *reqColIdx,
+ bool adjust_tlist_in_place,
+ int *p_numsortkeys,
+ AttrNumber **p_sortColIdx,
+ Oid **p_sortOperators,
+ Oid **p_collations,
+ bool **p_nullsFirst);
+static Sort *make_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
+ Relids relids);
+static IncrementalSort *make_incrementalsort_from_pathkeys(Plan *lefttree,
+ List *pathkeys, Relids relids, int nPresortedCols);
+static Sort *make_sort_from_groupcols(List *groupcls,
+ AttrNumber *grpColIdx,
+ Plan *lefttree);
+static Material *make_material(Plan *lefttree);
+static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators,
+ Oid *collations, List *param_exprs,
+ bool singlerow, bool binary_mode,
+ uint32 est_entries, Bitmapset *keyparamids);
+static WindowAgg *make_windowagg(List *tlist, Index winref,
+ int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
+ int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
+ int frameOptions, Node *startOffset, Node *endOffset,
+ Oid startInRangeFunc, Oid endInRangeFunc,
+ Oid inRangeColl, bool inRangeAsc, bool inRangeNullsFirst,
+ List *runCondition, List *qual, bool topWindow,
+ Plan *lefttree);
+static Group *make_group(List *tlist, List *qual, int numGroupCols,
+ AttrNumber *grpColIdx, Oid *grpOperators, Oid *grpCollations,
+ Plan *lefttree);
+static Unique *make_unique_from_sortclauses(Plan *lefttree, List *distinctList);
+static Unique *make_unique_from_pathkeys(Plan *lefttree,
+ List *pathkeys, int numCols);
+static Gather *make_gather(List *qptlist, List *qpqual,
+ int nworkers, int rescan_param, bool single_copy, Plan *subplan);
+static SetOp *make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree,
+ List *distinctList, AttrNumber flagColIdx, int firstFlag,
+ long numGroups);
+static LockRows *make_lockrows(Plan *lefttree, List *rowMarks, int epqParam);
+static Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan);
+static ProjectSet *make_project_set(List *tlist, Plan *subplan);
+static ModifyTable *make_modifytable(PlannerInfo *root, Plan *subplan,
+ CmdType operation, bool canSetTag,
+ Index nominalRelation, Index rootRelation,
+ bool partColsUpdated,
+ List *resultRelations,
+ List *updateColnosLists,
+ List *withCheckOptionLists, List *returningLists,
+ List *rowMarks, OnConflictExpr *onconflict,
+ List *mergeActionList, int epqParam);
+static GatherMerge *create_gather_merge_plan(PlannerInfo *root,
+ GatherMergePath *best_path);
+
+
+/*
+ * create_plan
+ * Creates the access plan for a query by recursively processing the
+ * desired tree of pathnodes, starting at the node 'best_path'. For
+ * every pathnode found, we create a corresponding plan node containing
+ * appropriate id, target list, and qualification information.
+ *
+ * The tlists and quals in the plan tree are still in planner format,
+ * ie, Vars still correspond to the parser's numbering. This will be
+ * fixed later by setrefs.c.
+ *
+ * best_path is the best access path
+ *
+ * Returns a Plan tree.
+ */
+Plan *
+create_plan(PlannerInfo *root, Path *best_path)
+{
+ Plan *plan;
+
+ /* plan_params should not be in use in current query level */
+ Assert(root->plan_params == NIL);
+
+ /* Initialize this module's workspace in PlannerInfo */
+ root->curOuterRels = NULL;
+ root->curOuterParams = NIL;
+
+ /* Recursively process the path tree, demanding the correct tlist result */
+ plan = create_plan_recurse(root, best_path, CP_EXACT_TLIST);
+
+ /*
+ * Make sure the topmost plan node's targetlist exposes the original
+ * column names and other decorative info. Targetlists generated within
+ * the planner don't bother with that stuff, but we must have it on the
+ * top-level tlist seen at execution time. However, ModifyTable plan
+ * nodes don't have a tlist matching the querytree targetlist.
+ */
+ if (!IsA(plan, ModifyTable))
+ apply_tlist_labeling(plan->targetlist, root->processed_tlist);
+
+ /*
+ * Attach any initPlans created in this query level to the topmost plan
+ * node. (In principle the initplans could go in any plan node at or
+ * above where they're referenced, but there seems no reason to put them
+ * any lower than the topmost node for the query level. Also, see
+ * comments for SS_finalize_plan before you try to change this.)
+ */
+ SS_attach_initplans(root, plan);
+
+ /* Check we successfully assigned all NestLoopParams to plan nodes */
+ if (root->curOuterParams != NIL)
+ elog(ERROR, "failed to assign all NestLoopParams to plan nodes");
+
+ /*
+ * Reset plan_params to ensure param IDs used for nestloop params are not
+ * re-used later
+ */
+ root->plan_params = NIL;
+
+ return plan;
+}
+
+/*
+ * create_plan_recurse
+ * Recursive guts of create_plan().
+ */
+static Plan *
+create_plan_recurse(PlannerInfo *root, Path *best_path, int flags)
+{
+ Plan *plan;
+
+ /* Guard against stack overflow due to overly complex plans */
+ check_stack_depth();
+
+ switch (best_path->pathtype)
+ {
+ case T_SeqScan:
+ case T_SampleScan:
+ case T_IndexScan:
+ case T_IndexOnlyScan:
+ case T_BitmapHeapScan:
+ case T_TidScan:
+ case T_TidRangeScan:
+ case T_SubqueryScan:
+ case T_FunctionScan:
+ case T_TableFuncScan:
+ case T_ValuesScan:
+ case T_CteScan:
+ case T_WorkTableScan:
+ case T_NamedTuplestoreScan:
+ case T_ForeignScan:
+ case T_CustomScan:
+ plan = create_scan_plan(root, best_path, flags);
+ break;
+ case T_HashJoin:
+ case T_MergeJoin:
+ case T_NestLoop:
+ plan = create_join_plan(root,
+ (JoinPath *) best_path);
+ break;
+ case T_Append:
+ plan = create_append_plan(root,
+ (AppendPath *) best_path,
+ flags);
+ break;
+ case T_MergeAppend:
+ plan = create_merge_append_plan(root,
+ (MergeAppendPath *) best_path,
+ flags);
+ break;
+ case T_Result:
+ if (IsA(best_path, ProjectionPath))
+ {
+ plan = create_projection_plan(root,
+ (ProjectionPath *) best_path,
+ flags);
+ }
+ else if (IsA(best_path, MinMaxAggPath))
+ {
+ plan = (Plan *) create_minmaxagg_plan(root,
+ (MinMaxAggPath *) best_path);
+ }
+ else if (IsA(best_path, GroupResultPath))
+ {
+ plan = (Plan *) create_group_result_plan(root,
+ (GroupResultPath *) best_path);
+ }
+ else
+ {
+ /* Simple RTE_RESULT base relation */
+ Assert(IsA(best_path, Path));
+ plan = create_scan_plan(root, best_path, flags);
+ }
+ break;
+ case T_ProjectSet:
+ plan = (Plan *) create_project_set_plan(root,
+ (ProjectSetPath *) best_path);
+ break;
+ case T_Material:
+ plan = (Plan *) create_material_plan(root,
+ (MaterialPath *) best_path,
+ flags);
+ break;
+ case T_Memoize:
+ plan = (Plan *) create_memoize_plan(root,
+ (MemoizePath *) best_path,
+ flags);
+ break;
+ case T_Unique:
+ if (IsA(best_path, UpperUniquePath))
+ {
+ plan = (Plan *) create_upper_unique_plan(root,
+ (UpperUniquePath *) best_path,
+ flags);
+ }
+ else
+ {
+ Assert(IsA(best_path, UniquePath));
+ plan = create_unique_plan(root,
+ (UniquePath *) best_path,
+ flags);
+ }
+ break;
+ case T_Gather:
+ plan = (Plan *) create_gather_plan(root,
+ (GatherPath *) best_path);
+ break;
+ case T_Sort:
+ plan = (Plan *) create_sort_plan(root,
+ (SortPath *) best_path,
+ flags);
+ break;
+ case T_IncrementalSort:
+ plan = (Plan *) create_incrementalsort_plan(root,
+ (IncrementalSortPath *) best_path,
+ flags);
+ break;
+ case T_Group:
+ plan = (Plan *) create_group_plan(root,
+ (GroupPath *) best_path);
+ break;
+ case T_Agg:
+ if (IsA(best_path, GroupingSetsPath))
+ plan = create_groupingsets_plan(root,
+ (GroupingSetsPath *) best_path);
+ else
+ {
+ Assert(IsA(best_path, AggPath));
+ plan = (Plan *) create_agg_plan(root,
+ (AggPath *) best_path);
+ }
+ break;
+ case T_WindowAgg:
+ plan = (Plan *) create_windowagg_plan(root,
+ (WindowAggPath *) best_path);
+ break;
+ case T_SetOp:
+ plan = (Plan *) create_setop_plan(root,
+ (SetOpPath *) best_path,
+ flags);
+ break;
+ case T_RecursiveUnion:
+ plan = (Plan *) create_recursiveunion_plan(root,
+ (RecursiveUnionPath *) best_path);
+ break;
+ case T_LockRows:
+ plan = (Plan *) create_lockrows_plan(root,
+ (LockRowsPath *) best_path,
+ flags);
+ break;
+ case T_ModifyTable:
+ plan = (Plan *) create_modifytable_plan(root,
+ (ModifyTablePath *) best_path);
+ break;
+ case T_Limit:
+ plan = (Plan *) create_limit_plan(root,
+ (LimitPath *) best_path,
+ flags);
+ break;
+ case T_GatherMerge:
+ plan = (Plan *) create_gather_merge_plan(root,
+ (GatherMergePath *) best_path);
+ break;
+ default:
+ elog(ERROR, "unrecognized node type: %d",
+ (int) best_path->pathtype);
+ plan = NULL; /* keep compiler quiet */
+ break;
+ }
+
+ return plan;
+}
+
+/*
+ * create_scan_plan
+ * Create a scan plan for the parent relation of 'best_path'.
+ */
+static Plan *
+create_scan_plan(PlannerInfo *root, Path *best_path, int flags)
+{
+ RelOptInfo *rel = best_path->parent;
+ List *scan_clauses;
+ List *gating_clauses;
+ List *tlist;
+ Plan *plan;
+
+ /*
+ * Extract the relevant restriction clauses from the parent relation. The
+ * executor must apply all these restrictions during the scan, except for
+ * pseudoconstants which we'll take care of below.
+ *
+ * If this is a plain indexscan or index-only scan, we need not consider
+ * restriction clauses that are implied by the index's predicate, so use
+ * indrestrictinfo not baserestrictinfo. Note that we can't do that for
+ * bitmap indexscans, since there's not necessarily a single index
+ * involved; but it doesn't matter since create_bitmap_scan_plan() will be
+ * able to get rid of such clauses anyway via predicate proof.
+ */
+ switch (best_path->pathtype)
+ {
+ case T_IndexScan:
+ case T_IndexOnlyScan:
+ scan_clauses = castNode(IndexPath, best_path)->indexinfo->indrestrictinfo;
+ break;
+ default:
+ scan_clauses = rel->baserestrictinfo;
+ break;
+ }
+
+ /*
+ * If this is a parameterized scan, we also need to enforce all the join
+ * clauses available from the outer relation(s).
+ *
+ * For paranoia's sake, don't modify the stored baserestrictinfo list.
+ */
+ if (best_path->param_info)
+ scan_clauses = list_concat_copy(scan_clauses,
+ best_path->param_info->ppi_clauses);
+
+ /*
+ * Detect whether we have any pseudoconstant quals to deal with. Then, if
+ * we'll need a gating Result node, it will be able to project, so there
+ * are no requirements on the child's tlist.
+ */
+ gating_clauses = get_gating_quals(root, scan_clauses);
+ if (gating_clauses)
+ flags = 0;
+
+ /*
+ * For table scans, rather than using the relation targetlist (which is
+ * only those Vars actually needed by the query), we prefer to generate a
+ * tlist containing all Vars in order. This will allow the executor to
+ * optimize away projection of the table tuples, if possible.
+ *
+ * But if the caller is going to ignore our tlist anyway, then don't
+ * bother generating one at all. We use an exact equality test here, so
+ * that this only applies when CP_IGNORE_TLIST is the only flag set.
+ */
+ if (flags == CP_IGNORE_TLIST)
+ {
+ tlist = NULL;
+ }
+ else if (use_physical_tlist(root, best_path, flags))
+ {
+ if (best_path->pathtype == T_IndexOnlyScan)
+ {
+ /* For index-only scan, the preferred tlist is the index's */
+ tlist = copyObject(((IndexPath *) best_path)->indexinfo->indextlist);
+
+ /*
+ * Transfer sortgroupref data to the replacement tlist, if
+ * requested (use_physical_tlist checked that this will work).
+ */
+ if (flags & CP_LABEL_TLIST)
+ apply_pathtarget_labeling_to_tlist(tlist, best_path->pathtarget);
+ }
+ else
+ {
+ tlist = build_physical_tlist(root, rel);
+ if (tlist == NIL)
+ {
+ /* Failed because of dropped cols, so use regular method */
+ tlist = build_path_tlist(root, best_path);
+ }
+ else
+ {
+ /* As above, transfer sortgroupref data to replacement tlist */
+ if (flags & CP_LABEL_TLIST)
+ apply_pathtarget_labeling_to_tlist(tlist, best_path->pathtarget);
+ }
+ }
+ }
+ else
+ {
+ tlist = build_path_tlist(root, best_path);
+ }
+
+ switch (best_path->pathtype)
+ {
+ case T_SeqScan:
+ plan = (Plan *) create_seqscan_plan(root,
+ best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_SampleScan:
+ plan = (Plan *) create_samplescan_plan(root,
+ best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_IndexScan:
+ plan = (Plan *) create_indexscan_plan(root,
+ (IndexPath *) best_path,
+ tlist,
+ scan_clauses,
+ false);
+ break;
+
+ case T_IndexOnlyScan:
+ plan = (Plan *) create_indexscan_plan(root,
+ (IndexPath *) best_path,
+ tlist,
+ scan_clauses,
+ true);
+ break;
+
+ case T_BitmapHeapScan:
+ plan = (Plan *) create_bitmap_scan_plan(root,
+ (BitmapHeapPath *) best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_TidScan:
+ plan = (Plan *) create_tidscan_plan(root,
+ (TidPath *) best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_TidRangeScan:
+ plan = (Plan *) create_tidrangescan_plan(root,
+ (TidRangePath *) best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_SubqueryScan:
+ plan = (Plan *) create_subqueryscan_plan(root,
+ (SubqueryScanPath *) best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_FunctionScan:
+ plan = (Plan *) create_functionscan_plan(root,
+ best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_TableFuncScan:
+ plan = (Plan *) create_tablefuncscan_plan(root,
+ best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_ValuesScan:
+ plan = (Plan *) create_valuesscan_plan(root,
+ best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_CteScan:
+ plan = (Plan *) create_ctescan_plan(root,
+ best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_NamedTuplestoreScan:
+ plan = (Plan *) create_namedtuplestorescan_plan(root,
+ best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_Result:
+ plan = (Plan *) create_resultscan_plan(root,
+ best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_WorkTableScan:
+ plan = (Plan *) create_worktablescan_plan(root,
+ best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_ForeignScan:
+ plan = (Plan *) create_foreignscan_plan(root,
+ (ForeignPath *) best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ case T_CustomScan:
+ plan = (Plan *) create_customscan_plan(root,
+ (CustomPath *) best_path,
+ tlist,
+ scan_clauses);
+ break;
+
+ default:
+ elog(ERROR, "unrecognized node type: %d",
+ (int) best_path->pathtype);
+ plan = NULL; /* keep compiler quiet */
+ break;
+ }
+
+ /*
+ * If there are any pseudoconstant clauses attached to this node, insert a
+ * gating Result node that evaluates the pseudoconstants as one-time
+ * quals.
+ */
+ if (gating_clauses)
+ plan = create_gating_plan(root, best_path, plan, gating_clauses);
+
+ return plan;
+}
+
+/*
+ * Build a target list (ie, a list of TargetEntry) for the Path's output.
+ *
+ * This is almost just make_tlist_from_pathtarget(), but we also have to
+ * deal with replacing nestloop params.
+ */
+static List *
+build_path_tlist(PlannerInfo *root, Path *path)
+{
+ List *tlist = NIL;
+ Index *sortgrouprefs = path->pathtarget->sortgrouprefs;
+ int resno = 1;
+ ListCell *v;
+
+ foreach(v, path->pathtarget->exprs)
+ {
+ Node *node = (Node *) lfirst(v);
+ TargetEntry *tle;
+
+ /*
+ * If it's a parameterized path, there might be lateral references in
+ * the tlist, which need to be replaced with Params. There's no need
+ * to remake the TargetEntry nodes, so apply this to each list item
+ * separately.
+ */
+ if (path->param_info)
+ node = replace_nestloop_params(root, node);
+
+ tle = makeTargetEntry((Expr *) node,
+ resno,
+ NULL,
+ false);
+ if (sortgrouprefs)
+ tle->ressortgroupref = sortgrouprefs[resno - 1];
+
+ tlist = lappend(tlist, tle);
+ resno++;
+ }
+ return tlist;
+}
+
+/*
+ * use_physical_tlist
+ * Decide whether to use a tlist matching relation structure,
+ * rather than only those Vars actually referenced.
+ */
+static bool
+use_physical_tlist(PlannerInfo *root, Path *path, int flags)
+{
+ RelOptInfo *rel = path->parent;
+ int i;
+ ListCell *lc;
+
+ /*
+ * Forget it if either exact tlist or small tlist is demanded.
+ */
+ if (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST))
+ return false;
+
+ /*
+ * We can do this for real relation scans, subquery scans, function scans,
+ * tablefunc scans, values scans, and CTE scans (but not for, eg, joins).
+ */
+ if (rel->rtekind != RTE_RELATION &&
+ rel->rtekind != RTE_SUBQUERY &&
+ rel->rtekind != RTE_FUNCTION &&
+ rel->rtekind != RTE_TABLEFUNC &&
+ rel->rtekind != RTE_VALUES &&
+ rel->rtekind != RTE_CTE)
+ return false;
+
+ /*
+ * Can't do it with inheritance cases either (mainly because Append
+ * doesn't project; this test may be unnecessary now that
+ * create_append_plan instructs its children to return an exact tlist).
+ */
+ if (rel->reloptkind != RELOPT_BASEREL)
+ return false;
+
+ /*
+ * Also, don't do it to a CustomPath; the premise that we're extracting
+ * columns from a simple physical tuple is unlikely to hold for those.
+ * (When it does make sense, the custom path creator can set up the path's
+ * pathtarget that way.)
+ */
+ if (IsA(path, CustomPath))
+ return false;
+
+ /*
+ * If a bitmap scan's tlist is empty, keep it as-is. This may allow the
+ * executor to skip heap page fetches, and in any case, the benefit of
+ * using a physical tlist instead would be minimal.
+ */
+ if (IsA(path, BitmapHeapPath) &&
+ path->pathtarget->exprs == NIL)
+ return false;
+
+ /*
+ * Can't do it if any system columns or whole-row Vars are requested.
+ * (This could possibly be fixed but would take some fragile assumptions
+ * in setrefs.c, I think.)
+ */
+ for (i = rel->min_attr; i <= 0; i++)
+ {
+ if (!bms_is_empty(rel->attr_needed[i - rel->min_attr]))
+ return false;
+ }
+
+ /*
+ * Can't do it if the rel is required to emit any placeholder expressions,
+ * either.
+ */
+ foreach(lc, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
+
+ if (bms_nonempty_difference(phinfo->ph_needed, rel->relids) &&
+ bms_is_subset(phinfo->ph_eval_at, rel->relids))
+ return false;
+ }
+
+ /*
+ * For an index-only scan, the "physical tlist" is the index's indextlist.
+ * We can only return that without a projection if all the index's columns
+ * are returnable.
+ */
+ if (path->pathtype == T_IndexOnlyScan)
+ {
+ IndexOptInfo *indexinfo = ((IndexPath *) path)->indexinfo;
+
+ for (i = 0; i < indexinfo->ncolumns; i++)
+ {
+ if (!indexinfo->canreturn[i])
+ return false;
+ }
+ }
+
+ /*
+ * Also, can't do it if CP_LABEL_TLIST is specified and path is requested
+ * to emit any sort/group columns that are not simple Vars. (If they are
+ * simple Vars, they should appear in the physical tlist, and
+ * apply_pathtarget_labeling_to_tlist will take care of getting them
+ * labeled again.) We also have to check that no two sort/group columns
+ * are the same Var, else that element of the physical tlist would need
+ * conflicting ressortgroupref labels.
+ */
+ if ((flags & CP_LABEL_TLIST) && path->pathtarget->sortgrouprefs)
+ {
+ Bitmapset *sortgroupatts = NULL;
+
+ i = 0;
+ foreach(lc, path->pathtarget->exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+
+ if (path->pathtarget->sortgrouprefs[i])
+ {
+ if (expr && IsA(expr, Var))
+ {
+ int attno = ((Var *) expr)->varattno;
+
+ attno -= FirstLowInvalidHeapAttributeNumber;
+ if (bms_is_member(attno, sortgroupatts))
+ return false;
+ sortgroupatts = bms_add_member(sortgroupatts, attno);
+ }
+ else
+ return false;
+ }
+ i++;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * get_gating_quals
+ * See if there are pseudoconstant quals in a node's quals list
+ *
+ * If the node's quals list includes any pseudoconstant quals,
+ * return just those quals.
+ */
+static List *
+get_gating_quals(PlannerInfo *root, List *quals)
+{
+ /* No need to look if we know there are no pseudoconstants */
+ if (!root->hasPseudoConstantQuals)
+ return NIL;
+
+ /* Sort into desirable execution order while still in RestrictInfo form */
+ quals = order_qual_clauses(root, quals);
+
+ /* Pull out any pseudoconstant quals from the RestrictInfo list */
+ return extract_actual_clauses(quals, true);
+}
+
+/*
+ * create_gating_plan
+ * Deal with pseudoconstant qual clauses
+ *
+ * Add a gating Result node atop the already-built plan.
+ */
+static Plan *
+create_gating_plan(PlannerInfo *root, Path *path, Plan *plan,
+ List *gating_quals)
+{
+ Plan *gplan;
+ Plan *splan;
+
+ Assert(gating_quals);
+
+ /*
+ * We might have a trivial Result plan already. Stacking one Result atop
+ * another is silly, so if that applies, just discard the input plan.
+ * (We're assuming its targetlist is uninteresting; it should be either
+ * the same as the result of build_path_tlist, or a simplified version.)
+ */
+ splan = plan;
+ if (IsA(plan, Result))
+ {
+ Result *rplan = (Result *) plan;
+
+ if (rplan->plan.lefttree == NULL &&
+ rplan->resconstantqual == NULL)
+ splan = NULL;
+ }
+
+ /*
+ * Since we need a Result node anyway, always return the path's requested
+ * tlist; that's never a wrong choice, even if the parent node didn't ask
+ * for CP_EXACT_TLIST.
+ */
+ gplan = (Plan *) make_result(build_path_tlist(root, path),
+ (Node *) gating_quals,
+ splan);
+
+ /*
+ * Notice that we don't change cost or size estimates when doing gating.
+ * The costs of qual eval were already included in the subplan's cost.
+ * Leaving the size alone amounts to assuming that the gating qual will
+ * succeed, which is the conservative estimate for planning upper queries.
+ * We certainly don't want to assume the output size is zero (unless the
+ * gating qual is actually constant FALSE, and that case is dealt with in
+ * clausesel.c). Interpolating between the two cases is silly, because it
+ * doesn't reflect what will really happen at runtime, and besides which
+ * in most cases we have only a very bad idea of the probability of the
+ * gating qual being true.
+ */
+ copy_plan_costsize(gplan, plan);
+
+ /* Gating quals could be unsafe, so better use the Path's safety flag */
+ gplan->parallel_safe = path->parallel_safe;
+
+ return gplan;
+}
+
+/*
+ * create_join_plan
+ * Create a join plan for 'best_path' and (recursively) plans for its
+ * inner and outer paths.
+ */
+static Plan *
+create_join_plan(PlannerInfo *root, JoinPath *best_path)
+{
+ Plan *plan;
+ List *gating_clauses;
+
+ switch (best_path->path.pathtype)
+ {
+ case T_MergeJoin:
+ plan = (Plan *) create_mergejoin_plan(root,
+ (MergePath *) best_path);
+ break;
+ case T_HashJoin:
+ plan = (Plan *) create_hashjoin_plan(root,
+ (HashPath *) best_path);
+ break;
+ case T_NestLoop:
+ plan = (Plan *) create_nestloop_plan(root,
+ (NestPath *) best_path);
+ break;
+ default:
+ elog(ERROR, "unrecognized node type: %d",
+ (int) best_path->path.pathtype);
+ plan = NULL; /* keep compiler quiet */
+ break;
+ }
+
+ /*
+ * If there are any pseudoconstant clauses attached to this node, insert a
+ * gating Result node that evaluates the pseudoconstants as one-time
+ * quals.
+ */
+ gating_clauses = get_gating_quals(root, best_path->joinrestrictinfo);
+ if (gating_clauses)
+ plan = create_gating_plan(root, (Path *) best_path, plan,
+ gating_clauses);
+
+#ifdef NOT_USED
+
+ /*
+ * * Expensive function pullups may have pulled local predicates * into
+ * this path node. Put them in the qpqual of the plan node. * JMH,
+ * 6/15/92
+ */
+ if (get_loc_restrictinfo(best_path) != NIL)
+ set_qpqual((Plan) plan,
+ list_concat(get_qpqual((Plan) plan),
+ get_actual_clauses(get_loc_restrictinfo(best_path))));
+#endif
+
+ return plan;
+}
+
+/*
+ * mark_async_capable_plan
+ * Check whether the Plan node created from a Path node is async-capable,
+ * and if so, mark the Plan node as such and return true, otherwise
+ * return false.
+ */
+static bool
+mark_async_capable_plan(Plan *plan, Path *path)
+{
+ switch (nodeTag(path))
+ {
+ case T_SubqueryScanPath:
+ {
+ SubqueryScan *scan_plan = (SubqueryScan *) plan;
+
+ /*
+ * If the generated plan node includes a gating Result node,
+ * we can't execute it asynchronously.
+ */
+ if (IsA(plan, Result))
+ return false;
+
+ /*
+ * If a SubqueryScan node atop of an async-capable plan node
+ * is deletable, consider it as async-capable.
+ */
+ if (trivial_subqueryscan(scan_plan) &&
+ mark_async_capable_plan(scan_plan->subplan,
+ ((SubqueryScanPath *) path)->subpath))
+ break;
+ return false;
+ }
+ case T_ForeignPath:
+ {
+ FdwRoutine *fdwroutine = path->parent->fdwroutine;
+
+ /*
+ * If the generated plan node includes a gating Result node,
+ * we can't execute it asynchronously.
+ */
+ if (IsA(plan, Result))
+ return false;
+
+ Assert(fdwroutine != NULL);
+ if (fdwroutine->IsForeignPathAsyncCapable != NULL &&
+ fdwroutine->IsForeignPathAsyncCapable((ForeignPath *) path))
+ break;
+ return false;
+ }
+ case T_ProjectionPath:
+
+ /*
+ * If the generated plan node includes a Result node for the
+ * projection, we can't execute it asynchronously.
+ */
+ if (IsA(plan, Result))
+ return false;
+
+ /*
+ * create_projection_plan() would have pulled up the subplan, so
+ * check the capability using the subpath.
+ */
+ if (mark_async_capable_plan(plan,
+ ((ProjectionPath *) path)->subpath))
+ return true;
+ return false;
+ default:
+ return false;
+ }
+
+ plan->async_capable = true;
+
+ return true;
+}
+
+/*
+ * create_append_plan
+ * Create an Append plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ *
+ * Returns a Plan node.
+ */
+static Plan *
+create_append_plan(PlannerInfo *root, AppendPath *best_path, int flags)
+{
+ Append *plan;
+ List *tlist = build_path_tlist(root, &best_path->path);
+ int orig_tlist_length = list_length(tlist);
+ bool tlist_was_changed = false;
+ List *pathkeys = best_path->path.pathkeys;
+ List *subplans = NIL;
+ ListCell *subpaths;
+ int nasyncplans = 0;
+ RelOptInfo *rel = best_path->path.parent;
+ PartitionPruneInfo *partpruneinfo = NULL;
+ int nodenumsortkeys = 0;
+ AttrNumber *nodeSortColIdx = NULL;
+ Oid *nodeSortOperators = NULL;
+ Oid *nodeCollations = NULL;
+ bool *nodeNullsFirst = NULL;
+ bool consider_async = false;
+
+ /*
+ * The subpaths list could be empty, if every child was proven empty by
+ * constraint exclusion. In that case generate a dummy plan that returns
+ * no rows.
+ *
+ * Note that an AppendPath with no members is also generated in certain
+ * cases where there was no appending construct at all, but we know the
+ * relation is empty (see set_dummy_rel_pathlist and mark_dummy_rel).
+ */
+ if (best_path->subpaths == NIL)
+ {
+ /* Generate a Result plan with constant-FALSE gating qual */
+ Plan *plan;
+
+ plan = (Plan *) make_result(tlist,
+ (Node *) list_make1(makeBoolConst(false,
+ false)),
+ NULL);
+
+ copy_generic_path_info(plan, (Path *) best_path);
+
+ return plan;
+ }
+
+ /*
+ * Otherwise build an Append plan. Note that if there's just one child,
+ * the Append is pretty useless; but we wait till setrefs.c to get rid of
+ * it. Doing so here doesn't work because the varno of the child scan
+ * plan won't match the parent-rel Vars it'll be asked to emit.
+ *
+ * We don't have the actual creation of the Append node split out into a
+ * separate make_xxx function. This is because we want to run
+ * prepare_sort_from_pathkeys on it before we do so on the individual
+ * child plans, to make cross-checking the sort info easier.
+ */
+ plan = makeNode(Append);
+ plan->plan.targetlist = tlist;
+ plan->plan.qual = NIL;
+ plan->plan.lefttree = NULL;
+ plan->plan.righttree = NULL;
+ plan->apprelids = rel->relids;
+
+ if (pathkeys != NIL)
+ {
+ /*
+ * Compute sort column info, and adjust the Append's tlist as needed.
+ * Because we pass adjust_tlist_in_place = true, we may ignore the
+ * function result; it must be the same plan node. However, we then
+ * need to detect whether any tlist entries were added.
+ */
+ (void) prepare_sort_from_pathkeys((Plan *) plan, pathkeys,
+ best_path->path.parent->relids,
+ NULL,
+ true,
+ &nodenumsortkeys,
+ &nodeSortColIdx,
+ &nodeSortOperators,
+ &nodeCollations,
+ &nodeNullsFirst);
+ tlist_was_changed = (orig_tlist_length != list_length(plan->plan.targetlist));
+ }
+
+ /* If appropriate, consider async append */
+ consider_async = (enable_async_append && pathkeys == NIL &&
+ !best_path->path.parallel_safe &&
+ list_length(best_path->subpaths) > 1);
+
+ /* Build the plan for each child */
+ foreach(subpaths, best_path->subpaths)
+ {
+ Path *subpath = (Path *) lfirst(subpaths);
+ Plan *subplan;
+
+ /* Must insist that all children return the same tlist */
+ subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
+
+ /*
+ * For ordered Appends, we must insert a Sort node if subplan isn't
+ * sufficiently ordered.
+ */
+ if (pathkeys != NIL)
+ {
+ int numsortkeys;
+ AttrNumber *sortColIdx;
+ Oid *sortOperators;
+ Oid *collations;
+ bool *nullsFirst;
+
+ /*
+ * Compute sort column info, and adjust subplan's tlist as needed.
+ * We must apply prepare_sort_from_pathkeys even to subplans that
+ * don't need an explicit sort, to make sure they are returning
+ * the same sort key columns the Append expects.
+ */
+ subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
+ subpath->parent->relids,
+ nodeSortColIdx,
+ false,
+ &numsortkeys,
+ &sortColIdx,
+ &sortOperators,
+ &collations,
+ &nullsFirst);
+
+ /*
+ * Check that we got the same sort key information. We just
+ * Assert that the sortops match, since those depend only on the
+ * pathkeys; but it seems like a good idea to check the sort
+ * column numbers explicitly, to ensure the tlists match up.
+ */
+ Assert(numsortkeys == nodenumsortkeys);
+ if (memcmp(sortColIdx, nodeSortColIdx,
+ numsortkeys * sizeof(AttrNumber)) != 0)
+ elog(ERROR, "Append child's targetlist doesn't match Append");
+ Assert(memcmp(sortOperators, nodeSortOperators,
+ numsortkeys * sizeof(Oid)) == 0);
+ Assert(memcmp(collations, nodeCollations,
+ numsortkeys * sizeof(Oid)) == 0);
+ Assert(memcmp(nullsFirst, nodeNullsFirst,
+ numsortkeys * sizeof(bool)) == 0);
+
+ /* Now, insert a Sort node if subplan isn't sufficiently ordered */
+ if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
+ {
+ Sort *sort = make_sort(subplan, numsortkeys,
+ sortColIdx, sortOperators,
+ collations, nullsFirst);
+
+ label_sort_with_costsize(root, sort, best_path->limit_tuples);
+ subplan = (Plan *) sort;
+ }
+ }
+
+ /* If needed, check to see if subplan can be executed asynchronously */
+ if (consider_async && mark_async_capable_plan(subplan, subpath))
+ {
+ Assert(subplan->async_capable);
+ ++nasyncplans;
+ }
+
+ subplans = lappend(subplans, subplan);
+ }
+
+ /*
+ * If any quals exist, they may be useful to perform further partition
+ * pruning during execution. Gather information needed by the executor to
+ * do partition pruning.
+ */
+ if (enable_partition_pruning)
+ {
+ List *prunequal;
+
+ prunequal = extract_actual_clauses(rel->baserestrictinfo, false);
+
+ if (best_path->path.param_info)
+ {
+ List *prmquals = best_path->path.param_info->ppi_clauses;
+
+ prmquals = extract_actual_clauses(prmquals, false);
+ prmquals = (List *) replace_nestloop_params(root,
+ (Node *) prmquals);
+
+ prunequal = list_concat(prunequal, prmquals);
+ }
+
+ if (prunequal != NIL)
+ partpruneinfo =
+ make_partition_pruneinfo(root, rel,
+ best_path->subpaths,
+ prunequal);
+ }
+
+ plan->appendplans = subplans;
+ plan->nasyncplans = nasyncplans;
+ plan->first_partial_plan = best_path->first_partial_path;
+ plan->part_prune_info = partpruneinfo;
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ /*
+ * If prepare_sort_from_pathkeys added sort columns, but we were told to
+ * produce either the exact tlist or a narrow tlist, we should get rid of
+ * the sort columns again. We must inject a projection node to do so.
+ */
+ if (tlist_was_changed && (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST)))
+ {
+ tlist = list_truncate(list_copy(plan->plan.targetlist),
+ orig_tlist_length);
+ return inject_projection_plan((Plan *) plan, tlist,
+ plan->plan.parallel_safe);
+ }
+ else
+ return (Plan *) plan;
+}
+
+/*
+ * create_merge_append_plan
+ * Create a MergeAppend plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ *
+ * Returns a Plan node.
+ */
+static Plan *
+create_merge_append_plan(PlannerInfo *root, MergeAppendPath *best_path,
+ int flags)
+{
+ MergeAppend *node = makeNode(MergeAppend);
+ Plan *plan = &node->plan;
+ List *tlist = build_path_tlist(root, &best_path->path);
+ int orig_tlist_length = list_length(tlist);
+ bool tlist_was_changed;
+ List *pathkeys = best_path->path.pathkeys;
+ List *subplans = NIL;
+ ListCell *subpaths;
+ RelOptInfo *rel = best_path->path.parent;
+ PartitionPruneInfo *partpruneinfo = NULL;
+
+ /*
+ * We don't have the actual creation of the MergeAppend node split out
+ * into a separate make_xxx function. This is because we want to run
+ * prepare_sort_from_pathkeys on it before we do so on the individual
+ * child plans, to make cross-checking the sort info easier.
+ */
+ copy_generic_path_info(plan, (Path *) best_path);
+ plan->targetlist = tlist;
+ plan->qual = NIL;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->apprelids = rel->relids;
+
+ /*
+ * Compute sort column info, and adjust MergeAppend's tlist as needed.
+ * Because we pass adjust_tlist_in_place = true, we may ignore the
+ * function result; it must be the same plan node. However, we then need
+ * to detect whether any tlist entries were added.
+ */
+ (void) prepare_sort_from_pathkeys(plan, pathkeys,
+ best_path->path.parent->relids,
+ NULL,
+ true,
+ &node->numCols,
+ &node->sortColIdx,
+ &node->sortOperators,
+ &node->collations,
+ &node->nullsFirst);
+ tlist_was_changed = (orig_tlist_length != list_length(plan->targetlist));
+
+ /*
+ * Now prepare the child plans. We must apply prepare_sort_from_pathkeys
+ * even to subplans that don't need an explicit sort, to make sure they
+ * are returning the same sort key columns the MergeAppend expects.
+ */
+ foreach(subpaths, best_path->subpaths)
+ {
+ Path *subpath = (Path *) lfirst(subpaths);
+ Plan *subplan;
+ int numsortkeys;
+ AttrNumber *sortColIdx;
+ Oid *sortOperators;
+ Oid *collations;
+ bool *nullsFirst;
+
+ /* Build the child plan */
+ /* Must insist that all children return the same tlist */
+ subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
+
+ /* Compute sort column info, and adjust subplan's tlist as needed */
+ subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
+ subpath->parent->relids,
+ node->sortColIdx,
+ false,
+ &numsortkeys,
+ &sortColIdx,
+ &sortOperators,
+ &collations,
+ &nullsFirst);
+
+ /*
+ * Check that we got the same sort key information. We just Assert
+ * that the sortops match, since those depend only on the pathkeys;
+ * but it seems like a good idea to check the sort column numbers
+ * explicitly, to ensure the tlists really do match up.
+ */
+ Assert(numsortkeys == node->numCols);
+ if (memcmp(sortColIdx, node->sortColIdx,
+ numsortkeys * sizeof(AttrNumber)) != 0)
+ elog(ERROR, "MergeAppend child's targetlist doesn't match MergeAppend");
+ Assert(memcmp(sortOperators, node->sortOperators,
+ numsortkeys * sizeof(Oid)) == 0);
+ Assert(memcmp(collations, node->collations,
+ numsortkeys * sizeof(Oid)) == 0);
+ Assert(memcmp(nullsFirst, node->nullsFirst,
+ numsortkeys * sizeof(bool)) == 0);
+
+ /* Now, insert a Sort node if subplan isn't sufficiently ordered */
+ if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
+ {
+ Sort *sort = make_sort(subplan, numsortkeys,
+ sortColIdx, sortOperators,
+ collations, nullsFirst);
+
+ label_sort_with_costsize(root, sort, best_path->limit_tuples);
+ subplan = (Plan *) sort;
+ }
+
+ subplans = lappend(subplans, subplan);
+ }
+
+ /*
+ * If any quals exist, they may be useful to perform further partition
+ * pruning during execution. Gather information needed by the executor to
+ * do partition pruning.
+ */
+ if (enable_partition_pruning)
+ {
+ List *prunequal;
+
+ prunequal = extract_actual_clauses(rel->baserestrictinfo, false);
+
+ if (best_path->path.param_info)
+ {
+ List *prmquals = best_path->path.param_info->ppi_clauses;
+
+ prmquals = extract_actual_clauses(prmquals, false);
+ prmquals = (List *) replace_nestloop_params(root,
+ (Node *) prmquals);
+
+ prunequal = list_concat(prunequal, prmquals);
+ }
+
+ if (prunequal != NIL)
+ partpruneinfo = make_partition_pruneinfo(root, rel,
+ best_path->subpaths,
+ prunequal);
+ }
+
+ node->mergeplans = subplans;
+ node->part_prune_info = partpruneinfo;
+
+ /*
+ * If prepare_sort_from_pathkeys added sort columns, but we were told to
+ * produce either the exact tlist or a narrow tlist, we should get rid of
+ * the sort columns again. We must inject a projection node to do so.
+ */
+ if (tlist_was_changed && (flags & (CP_EXACT_TLIST | CP_SMALL_TLIST)))
+ {
+ tlist = list_truncate(list_copy(plan->targetlist), orig_tlist_length);
+ return inject_projection_plan(plan, tlist, plan->parallel_safe);
+ }
+ else
+ return plan;
+}
+
+/*
+ * create_group_result_plan
+ * Create a Result plan for 'best_path'.
+ * This is only used for degenerate grouping cases.
+ *
+ * Returns a Plan node.
+ */
+static Result *
+create_group_result_plan(PlannerInfo *root, GroupResultPath *best_path)
+{
+ Result *plan;
+ List *tlist;
+ List *quals;
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ /* best_path->quals is just bare clauses */
+ quals = order_qual_clauses(root, best_path->quals);
+
+ plan = make_result(tlist, (Node *) quals, NULL);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_project_set_plan
+ * Create a ProjectSet plan for 'best_path'.
+ *
+ * Returns a Plan node.
+ */
+static ProjectSet *
+create_project_set_plan(PlannerInfo *root, ProjectSetPath *best_path)
+{
+ ProjectSet *plan;
+ Plan *subplan;
+ List *tlist;
+
+ /* Since we intend to project, we don't need to constrain child tlist */
+ subplan = create_plan_recurse(root, best_path->subpath, 0);
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ plan = make_project_set(tlist, subplan);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_material_plan
+ * Create a Material plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ *
+ * Returns a Plan node.
+ */
+static Material *
+create_material_plan(PlannerInfo *root, MaterialPath *best_path, int flags)
+{
+ Material *plan;
+ Plan *subplan;
+
+ /*
+ * We don't want any excess columns in the materialized tuples, so request
+ * a smaller tlist. Otherwise, since Material doesn't project, tlist
+ * requirements pass through.
+ */
+ subplan = create_plan_recurse(root, best_path->subpath,
+ flags | CP_SMALL_TLIST);
+
+ plan = make_material(subplan);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_memoize_plan
+ * Create a Memoize plan for 'best_path' and (recursively) plans for its
+ * subpaths.
+ *
+ * Returns a Plan node.
+ */
+static Memoize *
+create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags)
+{
+ Memoize *plan;
+ Bitmapset *keyparamids;
+ Plan *subplan;
+ Oid *operators;
+ Oid *collations;
+ List *param_exprs = NIL;
+ ListCell *lc;
+ ListCell *lc2;
+ int nkeys;
+ int i;
+
+ subplan = create_plan_recurse(root, best_path->subpath,
+ flags | CP_SMALL_TLIST);
+
+ param_exprs = (List *) replace_nestloop_params(root, (Node *)
+ best_path->param_exprs);
+
+ nkeys = list_length(param_exprs);
+ Assert(nkeys > 0);
+ operators = palloc(nkeys * sizeof(Oid));
+ collations = palloc(nkeys * sizeof(Oid));
+
+ i = 0;
+ forboth(lc, param_exprs, lc2, best_path->hash_operators)
+ {
+ Expr *param_expr = (Expr *) lfirst(lc);
+ Oid opno = lfirst_oid(lc2);
+
+ operators[i] = opno;
+ collations[i] = exprCollation((Node *) param_expr);
+ i++;
+ }
+
+ keyparamids = pull_paramids((Expr *) param_exprs);
+
+ plan = make_memoize(subplan, operators, collations, param_exprs,
+ best_path->singlerow, best_path->binary_mode,
+ best_path->est_entries, keyparamids);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_unique_plan
+ * Create a Unique plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ *
+ * Returns a Plan node.
+ */
+static Plan *
+create_unique_plan(PlannerInfo *root, UniquePath *best_path, int flags)
+{
+ Plan *plan;
+ Plan *subplan;
+ List *in_operators;
+ List *uniq_exprs;
+ List *newtlist;
+ int nextresno;
+ bool newitems;
+ int numGroupCols;
+ AttrNumber *groupColIdx;
+ Oid *groupCollations;
+ int groupColPos;
+ ListCell *l;
+
+ /* Unique doesn't project, so tlist requirements pass through */
+ subplan = create_plan_recurse(root, best_path->subpath, flags);
+
+ /* Done if we don't need to do any actual unique-ifying */
+ if (best_path->umethod == UNIQUE_PATH_NOOP)
+ return subplan;
+
+ /*
+ * As constructed, the subplan has a "flat" tlist containing just the Vars
+ * needed here and at upper levels. The values we are supposed to
+ * unique-ify may be expressions in these variables. We have to add any
+ * such expressions to the subplan's tlist.
+ *
+ * The subplan may have a "physical" tlist if it is a simple scan plan. If
+ * we're going to sort, this should be reduced to the regular tlist, so
+ * that we don't sort more data than we need to. For hashing, the tlist
+ * should be left as-is if we don't need to add any expressions; but if we
+ * do have to add expressions, then a projection step will be needed at
+ * runtime anyway, so we may as well remove unneeded items. Therefore
+ * newtlist starts from build_path_tlist() not just a copy of the
+ * subplan's tlist; and we don't install it into the subplan unless we are
+ * sorting or stuff has to be added.
+ */
+ in_operators = best_path->in_operators;
+ uniq_exprs = best_path->uniq_exprs;
+
+ /* initialize modified subplan tlist as just the "required" vars */
+ newtlist = build_path_tlist(root, &best_path->path);
+ nextresno = list_length(newtlist) + 1;
+ newitems = false;
+
+ foreach(l, uniq_exprs)
+ {
+ Expr *uniqexpr = lfirst(l);
+ TargetEntry *tle;
+
+ tle = tlist_member(uniqexpr, newtlist);
+ if (!tle)
+ {
+ tle = makeTargetEntry((Expr *) uniqexpr,
+ nextresno,
+ NULL,
+ false);
+ newtlist = lappend(newtlist, tle);
+ nextresno++;
+ newitems = true;
+ }
+ }
+
+ /* Use change_plan_targetlist in case we need to insert a Result node */
+ if (newitems || best_path->umethod == UNIQUE_PATH_SORT)
+ subplan = change_plan_targetlist(subplan, newtlist,
+ best_path->path.parallel_safe);
+
+ /*
+ * Build control information showing which subplan output columns are to
+ * be examined by the grouping step. Unfortunately we can't merge this
+ * with the previous loop, since we didn't then know which version of the
+ * subplan tlist we'd end up using.
+ */
+ newtlist = subplan->targetlist;
+ numGroupCols = list_length(uniq_exprs);
+ groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
+ groupCollations = (Oid *) palloc(numGroupCols * sizeof(Oid));
+
+ groupColPos = 0;
+ foreach(l, uniq_exprs)
+ {
+ Expr *uniqexpr = lfirst(l);
+ TargetEntry *tle;
+
+ tle = tlist_member(uniqexpr, newtlist);
+ if (!tle) /* shouldn't happen */
+ elog(ERROR, "failed to find unique expression in subplan tlist");
+ groupColIdx[groupColPos] = tle->resno;
+ groupCollations[groupColPos] = exprCollation((Node *) tle->expr);
+ groupColPos++;
+ }
+
+ if (best_path->umethod == UNIQUE_PATH_HASH)
+ {
+ Oid *groupOperators;
+
+ /*
+ * Get the hashable equality operators for the Agg node to use.
+ * Normally these are the same as the IN clause operators, but if
+ * those are cross-type operators then the equality operators are the
+ * ones for the IN clause operators' RHS datatype.
+ */
+ groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid));
+ groupColPos = 0;
+ foreach(l, in_operators)
+ {
+ Oid in_oper = lfirst_oid(l);
+ Oid eq_oper;
+
+ if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
+ elog(ERROR, "could not find compatible hash operator for operator %u",
+ in_oper);
+ groupOperators[groupColPos++] = eq_oper;
+ }
+
+ /*
+ * Since the Agg node is going to project anyway, we can give it the
+ * minimum output tlist, without any stuff we might have added to the
+ * subplan tlist.
+ */
+ plan = (Plan *) make_agg(build_path_tlist(root, &best_path->path),
+ NIL,
+ AGG_HASHED,
+ AGGSPLIT_SIMPLE,
+ numGroupCols,
+ groupColIdx,
+ groupOperators,
+ groupCollations,
+ NIL,
+ NIL,
+ best_path->path.rows,
+ 0,
+ subplan);
+ }
+ else
+ {
+ List *sortList = NIL;
+ Sort *sort;
+
+ /* Create an ORDER BY list to sort the input compatibly */
+ groupColPos = 0;
+ foreach(l, in_operators)
+ {
+ Oid in_oper = lfirst_oid(l);
+ Oid sortop;
+ Oid eqop;
+ TargetEntry *tle;
+ SortGroupClause *sortcl;
+
+ sortop = get_ordering_op_for_equality_op(in_oper, false);
+ if (!OidIsValid(sortop)) /* shouldn't happen */
+ elog(ERROR, "could not find ordering operator for equality operator %u",
+ in_oper);
+
+ /*
+ * The Unique node will need equality operators. Normally these
+ * are the same as the IN clause operators, but if those are
+ * cross-type operators then the equality operators are the ones
+ * for the IN clause operators' RHS datatype.
+ */
+ eqop = get_equality_op_for_ordering_op(sortop, NULL);
+ if (!OidIsValid(eqop)) /* shouldn't happen */
+ elog(ERROR, "could not find equality operator for ordering operator %u",
+ sortop);
+
+ tle = get_tle_by_resno(subplan->targetlist,
+ groupColIdx[groupColPos]);
+ Assert(tle != NULL);
+
+ sortcl = makeNode(SortGroupClause);
+ sortcl->tleSortGroupRef = assignSortGroupRef(tle,
+ subplan->targetlist);
+ sortcl->eqop = eqop;
+ sortcl->sortop = sortop;
+ sortcl->nulls_first = false;
+ sortcl->hashable = false; /* no need to make this accurate */
+ sortList = lappend(sortList, sortcl);
+ groupColPos++;
+ }
+ sort = make_sort_from_sortclauses(sortList, subplan);
+ label_sort_with_costsize(root, sort, -1.0);
+ plan = (Plan *) make_unique_from_sortclauses((Plan *) sort, sortList);
+ }
+
+ /* Copy cost data from Path to Plan */
+ copy_generic_path_info(plan, &best_path->path);
+
+ return plan;
+}
+
+/*
+ * create_gather_plan
+ *
+ * Create a Gather plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+static Gather *
+create_gather_plan(PlannerInfo *root, GatherPath *best_path)
+{
+ Gather *gather_plan;
+ Plan *subplan;
+ List *tlist;
+
+ /*
+ * Push projection down to the child node. That way, the projection work
+ * is parallelized, and there can be no system columns in the result (they
+ * can't travel through a tuple queue because it uses MinimalTuple
+ * representation).
+ */
+ subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ gather_plan = make_gather(tlist,
+ NIL,
+ best_path->num_workers,
+ assign_special_exec_param(root),
+ best_path->single_copy,
+ subplan);
+
+ copy_generic_path_info(&gather_plan->plan, &best_path->path);
+
+ /* use parallel mode for parallel plans. */
+ root->glob->parallelModeNeeded = true;
+
+ return gather_plan;
+}
+
+/*
+ * create_gather_merge_plan
+ *
+ * Create a Gather Merge plan for 'best_path' and (recursively)
+ * plans for its subpaths.
+ */
+static GatherMerge *
+create_gather_merge_plan(PlannerInfo *root, GatherMergePath *best_path)
+{
+ GatherMerge *gm_plan;
+ Plan *subplan;
+ List *pathkeys = best_path->path.pathkeys;
+ List *tlist = build_path_tlist(root, &best_path->path);
+
+ /* As with Gather, project away columns in the workers. */
+ subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);
+
+ /* Create a shell for a GatherMerge plan. */
+ gm_plan = makeNode(GatherMerge);
+ gm_plan->plan.targetlist = tlist;
+ gm_plan->num_workers = best_path->num_workers;
+ copy_generic_path_info(&gm_plan->plan, &best_path->path);
+
+ /* Assign the rescan Param. */
+ gm_plan->rescan_param = assign_special_exec_param(root);
+
+ /* Gather Merge is pointless with no pathkeys; use Gather instead. */
+ Assert(pathkeys != NIL);
+
+ /* Compute sort column info, and adjust subplan's tlist as needed */
+ subplan = prepare_sort_from_pathkeys(subplan, pathkeys,
+ best_path->subpath->parent->relids,
+ gm_plan->sortColIdx,
+ false,
+ &gm_plan->numCols,
+ &gm_plan->sortColIdx,
+ &gm_plan->sortOperators,
+ &gm_plan->collations,
+ &gm_plan->nullsFirst);
+
+
+ /*
+ * All gather merge paths should have already guaranteed the necessary
+ * sort order either by adding an explicit sort node or by using presorted
+ * input. We can't simply add a sort here on additional pathkeys, because
+ * we can't guarantee the sort would be safe. For example, expressions may
+ * be volatile or otherwise parallel unsafe.
+ */
+ if (!pathkeys_contained_in(pathkeys, best_path->subpath->pathkeys))
+ elog(ERROR, "gather merge input not sufficiently sorted");
+
+ /* Now insert the subplan under GatherMerge. */
+ gm_plan->plan.lefttree = subplan;
+
+ /* use parallel mode for parallel plans. */
+ root->glob->parallelModeNeeded = true;
+
+ return gm_plan;
+}
+
+/*
+ * create_projection_plan
+ *
+ * Create a plan tree to do a projection step and (recursively) plans
+ * for its subpaths. We may need a Result node for the projection,
+ * but sometimes we can just let the subplan do the work.
+ */
+static Plan *
+create_projection_plan(PlannerInfo *root, ProjectionPath *best_path, int flags)
+{
+ Plan *plan;
+ Plan *subplan;
+ List *tlist;
+ bool needs_result_node = false;
+
+ /*
+ * Convert our subpath to a Plan and determine whether we need a Result
+ * node.
+ *
+ * In most cases where we don't need to project, creation_projection_path
+ * will have set dummypp, but not always. First, some createplan.c
+ * routines change the tlists of their nodes. (An example is that
+ * create_merge_append_plan might add resjunk sort columns to a
+ * MergeAppend.) Second, create_projection_path has no way of knowing
+ * what path node will be placed on top of the projection path and
+ * therefore can't predict whether it will require an exact tlist. For
+ * both of these reasons, we have to recheck here.
+ */
+ if (use_physical_tlist(root, &best_path->path, flags))
+ {
+ /*
+ * Our caller doesn't really care what tlist we return, so we don't
+ * actually need to project. However, we may still need to ensure
+ * proper sortgroupref labels, if the caller cares about those.
+ */
+ subplan = create_plan_recurse(root, best_path->subpath, 0);
+ tlist = subplan->targetlist;
+ if (flags & CP_LABEL_TLIST)
+ apply_pathtarget_labeling_to_tlist(tlist,
+ best_path->path.pathtarget);
+ }
+ else if (is_projection_capable_path(best_path->subpath))
+ {
+ /*
+ * Our caller requires that we return the exact tlist, but no separate
+ * result node is needed because the subpath is projection-capable.
+ * Tell create_plan_recurse that we're going to ignore the tlist it
+ * produces.
+ */
+ subplan = create_plan_recurse(root, best_path->subpath,
+ CP_IGNORE_TLIST);
+ Assert(is_projection_capable_plan(subplan));
+ tlist = build_path_tlist(root, &best_path->path);
+ }
+ else
+ {
+ /*
+ * It looks like we need a result node, unless by good fortune the
+ * requested tlist is exactly the one the child wants to produce.
+ */
+ subplan = create_plan_recurse(root, best_path->subpath, 0);
+ tlist = build_path_tlist(root, &best_path->path);
+ needs_result_node = !tlist_same_exprs(tlist, subplan->targetlist);
+ }
+
+ /*
+ * If we make a different decision about whether to include a Result node
+ * than create_projection_path did, we'll have made slightly wrong cost
+ * estimates; but label the plan with the cost estimates we actually used,
+ * not "corrected" ones. (XXX this could be cleaned up if we moved more
+ * of the sortcolumn setup logic into Path creation, but that would add
+ * expense to creating Paths we might end up not using.)
+ */
+ if (!needs_result_node)
+ {
+ /* Don't need a separate Result, just assign tlist to subplan */
+ plan = subplan;
+ plan->targetlist = tlist;
+
+ /* Label plan with the estimated costs we actually used */
+ plan->startup_cost = best_path->path.startup_cost;
+ plan->total_cost = best_path->path.total_cost;
+ plan->plan_rows = best_path->path.rows;
+ plan->plan_width = best_path->path.pathtarget->width;
+ plan->parallel_safe = best_path->path.parallel_safe;
+ /* ... but don't change subplan's parallel_aware flag */
+ }
+ else
+ {
+ /* We need a Result node */
+ plan = (Plan *) make_result(tlist, NULL, subplan);
+
+ copy_generic_path_info(plan, (Path *) best_path);
+ }
+
+ return plan;
+}
+
+/*
+ * inject_projection_plan
+ * Insert a Result node to do a projection step.
+ *
+ * This is used in a few places where we decide on-the-fly that we need a
+ * projection step as part of the tree generated for some Path node.
+ * We should try to get rid of this in favor of doing it more honestly.
+ *
+ * One reason it's ugly is we have to be told the right parallel_safe marking
+ * to apply (since the tlist might be unsafe even if the child plan is safe).
+ */
+static Plan *
+inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe)
+{
+ Plan *plan;
+
+ plan = (Plan *) make_result(tlist, NULL, subplan);
+
+ /*
+ * In principle, we should charge tlist eval cost plus cpu_per_tuple per
+ * row for the Result node. But the former has probably been factored in
+ * already and the latter was not accounted for during Path construction,
+ * so being formally correct might just make the EXPLAIN output look less
+ * consistent not more so. Hence, just copy the subplan's cost.
+ */
+ copy_plan_costsize(plan, subplan);
+ plan->parallel_safe = parallel_safe;
+
+ return plan;
+}
+
+/*
+ * change_plan_targetlist
+ * Externally available wrapper for inject_projection_plan.
+ *
+ * This is meant for use by FDW plan-generation functions, which might
+ * want to adjust the tlist computed by some subplan tree. In general,
+ * a Result node is needed to compute the new tlist, but we can optimize
+ * some cases.
+ *
+ * In most cases, tlist_parallel_safe can just be passed as the parallel_safe
+ * flag of the FDW's own Path node.
+ */
+Plan *
+change_plan_targetlist(Plan *subplan, List *tlist, bool tlist_parallel_safe)
+{
+ /*
+ * If the top plan node can't do projections and its existing target list
+ * isn't already what we need, we need to add a Result node to help it
+ * along.
+ */
+ if (!is_projection_capable_plan(subplan) &&
+ !tlist_same_exprs(tlist, subplan->targetlist))
+ subplan = inject_projection_plan(subplan, tlist,
+ subplan->parallel_safe &&
+ tlist_parallel_safe);
+ else
+ {
+ /* Else we can just replace the plan node's tlist */
+ subplan->targetlist = tlist;
+ subplan->parallel_safe &= tlist_parallel_safe;
+ }
+ return subplan;
+}
+
+/*
+ * create_sort_plan
+ *
+ * Create a Sort plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+static Sort *
+create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags)
+{
+ Sort *plan;
+ Plan *subplan;
+
+ /*
+ * We don't want any excess columns in the sorted tuples, so request a
+ * smaller tlist. Otherwise, since Sort doesn't project, tlist
+ * requirements pass through.
+ */
+ subplan = create_plan_recurse(root, best_path->subpath,
+ flags | CP_SMALL_TLIST);
+
+ /*
+ * make_sort_from_pathkeys indirectly calls find_ec_member_matching_expr,
+ * which will ignore any child EC members that don't belong to the given
+ * relids. Thus, if this sort path is based on a child relation, we must
+ * pass its relids.
+ */
+ plan = make_sort_from_pathkeys(subplan, best_path->path.pathkeys,
+ IS_OTHER_REL(best_path->subpath->parent) ?
+ best_path->path.parent->relids : NULL);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_incrementalsort_plan
+ *
+ * Do the same as create_sort_plan, but create IncrementalSort plan.
+ */
+static IncrementalSort *
+create_incrementalsort_plan(PlannerInfo *root, IncrementalSortPath *best_path,
+ int flags)
+{
+ IncrementalSort *plan;
+ Plan *subplan;
+
+ /* See comments in create_sort_plan() above */
+ subplan = create_plan_recurse(root, best_path->spath.subpath,
+ flags | CP_SMALL_TLIST);
+ plan = make_incrementalsort_from_pathkeys(subplan,
+ best_path->spath.path.pathkeys,
+ IS_OTHER_REL(best_path->spath.subpath->parent) ?
+ best_path->spath.path.parent->relids : NULL,
+ best_path->nPresortedCols);
+
+ copy_generic_path_info(&plan->sort.plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_group_plan
+ *
+ * Create a Group plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+static Group *
+create_group_plan(PlannerInfo *root, GroupPath *best_path)
+{
+ Group *plan;
+ Plan *subplan;
+ List *tlist;
+ List *quals;
+
+ /*
+ * Group can project, so no need to be terribly picky about child tlist,
+ * but we do need grouping columns to be available
+ */
+ subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ quals = order_qual_clauses(root, best_path->qual);
+
+ plan = make_group(tlist,
+ quals,
+ list_length(best_path->groupClause),
+ extract_grouping_cols(best_path->groupClause,
+ subplan->targetlist),
+ extract_grouping_ops(best_path->groupClause),
+ extract_grouping_collations(best_path->groupClause,
+ subplan->targetlist),
+ subplan);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_upper_unique_plan
+ *
+ * Create a Unique plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+static Unique *
+create_upper_unique_plan(PlannerInfo *root, UpperUniquePath *best_path, int flags)
+{
+ Unique *plan;
+ Plan *subplan;
+
+ /*
+ * Unique doesn't project, so tlist requirements pass through; moreover we
+ * need grouping columns to be labeled.
+ */
+ subplan = create_plan_recurse(root, best_path->subpath,
+ flags | CP_LABEL_TLIST);
+
+ plan = make_unique_from_pathkeys(subplan,
+ best_path->path.pathkeys,
+ best_path->numkeys);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_agg_plan
+ *
+ * Create an Agg plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+static Agg *
+create_agg_plan(PlannerInfo *root, AggPath *best_path)
+{
+ Agg *plan;
+ Plan *subplan;
+ List *tlist;
+ List *quals;
+
+ /*
+ * Agg can project, so no need to be terribly picky about child tlist, but
+ * we do need grouping columns to be available
+ */
+ subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ quals = order_qual_clauses(root, best_path->qual);
+
+ plan = make_agg(tlist, quals,
+ best_path->aggstrategy,
+ best_path->aggsplit,
+ list_length(best_path->groupClause),
+ extract_grouping_cols(best_path->groupClause,
+ subplan->targetlist),
+ extract_grouping_ops(best_path->groupClause),
+ extract_grouping_collations(best_path->groupClause,
+ subplan->targetlist),
+ NIL,
+ NIL,
+ best_path->numGroups,
+ best_path->transitionSpace,
+ subplan);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * Given a groupclause for a collection of grouping sets, produce the
+ * corresponding groupColIdx.
+ *
+ * root->grouping_map maps the tleSortGroupRef to the actual column position in
+ * the input tuple. So we get the ref from the entries in the groupclause and
+ * look them up there.
+ */
+static AttrNumber *
+remap_groupColIdx(PlannerInfo *root, List *groupClause)
+{
+ AttrNumber *grouping_map = root->grouping_map;
+ AttrNumber *new_grpColIdx;
+ ListCell *lc;
+ int i;
+
+ Assert(grouping_map);
+
+ new_grpColIdx = palloc0(sizeof(AttrNumber) * list_length(groupClause));
+
+ i = 0;
+ foreach(lc, groupClause)
+ {
+ SortGroupClause *clause = lfirst(lc);
+
+ new_grpColIdx[i++] = grouping_map[clause->tleSortGroupRef];
+ }
+
+ return new_grpColIdx;
+}
+
+/*
+ * create_groupingsets_plan
+ * Create a plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ *
+ * What we emit is an Agg plan with some vestigial Agg and Sort nodes
+ * hanging off the side. The top Agg implements the last grouping set
+ * specified in the GroupingSetsPath, and any additional grouping sets
+ * each give rise to a subsidiary Agg and Sort node in the top Agg's
+ * "chain" list. These nodes don't participate in the plan directly,
+ * but they are a convenient way to represent the required data for
+ * the extra steps.
+ *
+ * Returns a Plan node.
+ */
+static Plan *
+create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path)
+{
+ Agg *plan;
+ Plan *subplan;
+ List *rollups = best_path->rollups;
+ AttrNumber *grouping_map;
+ int maxref;
+ List *chain;
+ ListCell *lc;
+
+ /* Shouldn't get here without grouping sets */
+ Assert(root->parse->groupingSets);
+ Assert(rollups != NIL);
+
+ /*
+ * Agg can project, so no need to be terribly picky about child tlist, but
+ * we do need grouping columns to be available
+ */
+ subplan = create_plan_recurse(root, best_path->subpath, CP_LABEL_TLIST);
+
+ /*
+ * Compute the mapping from tleSortGroupRef to column index in the child's
+ * tlist. First, identify max SortGroupRef in groupClause, for array
+ * sizing.
+ */
+ maxref = 0;
+ foreach(lc, root->parse->groupClause)
+ {
+ SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
+
+ if (gc->tleSortGroupRef > maxref)
+ maxref = gc->tleSortGroupRef;
+ }
+
+ grouping_map = (AttrNumber *) palloc0((maxref + 1) * sizeof(AttrNumber));
+
+ /* Now look up the column numbers in the child's tlist */
+ foreach(lc, root->parse->groupClause)
+ {
+ SortGroupClause *gc = (SortGroupClause *) lfirst(lc);
+ TargetEntry *tle = get_sortgroupclause_tle(gc, subplan->targetlist);
+
+ grouping_map[gc->tleSortGroupRef] = tle->resno;
+ }
+
+ /*
+ * During setrefs.c, we'll need the grouping_map to fix up the cols lists
+ * in GroupingFunc nodes. Save it for setrefs.c to use.
+ */
+ Assert(root->grouping_map == NULL);
+ root->grouping_map = grouping_map;
+
+ /*
+ * Generate the side nodes that describe the other sort and group
+ * operations besides the top one. Note that we don't worry about putting
+ * accurate cost estimates in the side nodes; only the topmost Agg node's
+ * costs will be shown by EXPLAIN.
+ */
+ chain = NIL;
+ if (list_length(rollups) > 1)
+ {
+ bool is_first_sort = ((RollupData *) linitial(rollups))->is_hashed;
+
+ for_each_from(lc, rollups, 1)
+ {
+ RollupData *rollup = lfirst(lc);
+ AttrNumber *new_grpColIdx;
+ Plan *sort_plan = NULL;
+ Plan *agg_plan;
+ AggStrategy strat;
+
+ new_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
+
+ if (!rollup->is_hashed && !is_first_sort)
+ {
+ sort_plan = (Plan *)
+ make_sort_from_groupcols(rollup->groupClause,
+ new_grpColIdx,
+ subplan);
+ }
+
+ if (!rollup->is_hashed)
+ is_first_sort = false;
+
+ if (rollup->is_hashed)
+ strat = AGG_HASHED;
+ else if (list_length(linitial(rollup->gsets)) == 0)
+ strat = AGG_PLAIN;
+ else
+ strat = AGG_SORTED;
+
+ agg_plan = (Plan *) make_agg(NIL,
+ NIL,
+ strat,
+ AGGSPLIT_SIMPLE,
+ list_length((List *) linitial(rollup->gsets)),
+ new_grpColIdx,
+ extract_grouping_ops(rollup->groupClause),
+ extract_grouping_collations(rollup->groupClause, subplan->targetlist),
+ rollup->gsets,
+ NIL,
+ rollup->numGroups,
+ best_path->transitionSpace,
+ sort_plan);
+
+ /*
+ * Remove stuff we don't need to avoid bloating debug output.
+ */
+ if (sort_plan)
+ {
+ sort_plan->targetlist = NIL;
+ sort_plan->lefttree = NULL;
+ }
+
+ chain = lappend(chain, agg_plan);
+ }
+ }
+
+ /*
+ * Now make the real Agg node
+ */
+ {
+ RollupData *rollup = linitial(rollups);
+ AttrNumber *top_grpColIdx;
+ int numGroupCols;
+
+ top_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
+
+ numGroupCols = list_length((List *) linitial(rollup->gsets));
+
+ plan = make_agg(build_path_tlist(root, &best_path->path),
+ best_path->qual,
+ best_path->aggstrategy,
+ AGGSPLIT_SIMPLE,
+ numGroupCols,
+ top_grpColIdx,
+ extract_grouping_ops(rollup->groupClause),
+ extract_grouping_collations(rollup->groupClause, subplan->targetlist),
+ rollup->gsets,
+ chain,
+ rollup->numGroups,
+ best_path->transitionSpace,
+ subplan);
+
+ /* Copy cost data from Path to Plan */
+ copy_generic_path_info(&plan->plan, &best_path->path);
+ }
+
+ return (Plan *) plan;
+}
+
+/*
+ * create_minmaxagg_plan
+ *
+ * Create a Result plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+static Result *
+create_minmaxagg_plan(PlannerInfo *root, MinMaxAggPath *best_path)
+{
+ Result *plan;
+ List *tlist;
+ ListCell *lc;
+
+ /* Prepare an InitPlan for each aggregate's subquery. */
+ foreach(lc, best_path->mmaggregates)
+ {
+ MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
+ PlannerInfo *subroot = mminfo->subroot;
+ Query *subparse = subroot->parse;
+ Plan *plan;
+
+ /*
+ * Generate the plan for the subquery. We already have a Path, but we
+ * have to convert it to a Plan and attach a LIMIT node above it.
+ * Since we are entering a different planner context (subroot),
+ * recurse to create_plan not create_plan_recurse.
+ */
+ plan = create_plan(subroot, mminfo->path);
+
+ plan = (Plan *) make_limit(plan,
+ subparse->limitOffset,
+ subparse->limitCount,
+ subparse->limitOption,
+ 0, NULL, NULL, NULL);
+
+ /* Must apply correct cost/width data to Limit node */
+ plan->startup_cost = mminfo->path->startup_cost;
+ plan->total_cost = mminfo->pathcost;
+ plan->plan_rows = 1;
+ plan->plan_width = mminfo->path->pathtarget->width;
+ plan->parallel_aware = false;
+ plan->parallel_safe = mminfo->path->parallel_safe;
+
+ /* Convert the plan into an InitPlan in the outer query. */
+ SS_make_initplan_from_plan(root, subroot, plan, mminfo->param);
+ }
+
+ /* Generate the output plan --- basically just a Result */
+ tlist = build_path_tlist(root, &best_path->path);
+
+ plan = make_result(tlist, (Node *) best_path->quals, NULL);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ /*
+ * During setrefs.c, we'll need to replace references to the Agg nodes
+ * with InitPlan output params. (We can't just do that locally in the
+ * MinMaxAgg node, because path nodes above here may have Agg references
+ * as well.) Save the mmaggregates list to tell setrefs.c to do that.
+ */
+ Assert(root->minmax_aggs == NIL);
+ root->minmax_aggs = best_path->mmaggregates;
+
+ return plan;
+}
+
+/*
+ * create_windowagg_plan
+ *
+ * Create a WindowAgg plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+static WindowAgg *
+create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path)
+{
+ WindowAgg *plan;
+ WindowClause *wc = best_path->winclause;
+ int numPart = list_length(wc->partitionClause);
+ int numOrder = list_length(wc->orderClause);
+ Plan *subplan;
+ List *tlist;
+ int partNumCols;
+ AttrNumber *partColIdx;
+ Oid *partOperators;
+ Oid *partCollations;
+ int ordNumCols;
+ AttrNumber *ordColIdx;
+ Oid *ordOperators;
+ Oid *ordCollations;
+ ListCell *lc;
+
+ /*
+ * Choice of tlist here is motivated by the fact that WindowAgg will be
+ * storing the input rows of window frames in a tuplestore; it therefore
+ * behooves us to request a small tlist to avoid wasting space. We do of
+ * course need grouping columns to be available.
+ */
+ subplan = create_plan_recurse(root, best_path->subpath,
+ CP_LABEL_TLIST | CP_SMALL_TLIST);
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ /*
+ * Convert SortGroupClause lists into arrays of attr indexes and equality
+ * operators, as wanted by executor. (Note: in principle, it's possible
+ * to drop some of the sort columns, if they were proved redundant by
+ * pathkey logic. However, it doesn't seem worth going out of our way to
+ * optimize such cases. In any case, we must *not* remove the ordering
+ * column for RANGE OFFSET cases, as the executor needs that for in_range
+ * tests even if it's known to be equal to some partitioning column.)
+ */
+ partColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numPart);
+ partOperators = (Oid *) palloc(sizeof(Oid) * numPart);
+ partCollations = (Oid *) palloc(sizeof(Oid) * numPart);
+
+ partNumCols = 0;
+ foreach(lc, wc->partitionClause)
+ {
+ SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
+ TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist);
+
+ Assert(OidIsValid(sgc->eqop));
+ partColIdx[partNumCols] = tle->resno;
+ partOperators[partNumCols] = sgc->eqop;
+ partCollations[partNumCols] = exprCollation((Node *) tle->expr);
+ partNumCols++;
+ }
+
+ ordColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numOrder);
+ ordOperators = (Oid *) palloc(sizeof(Oid) * numOrder);
+ ordCollations = (Oid *) palloc(sizeof(Oid) * numOrder);
+
+ ordNumCols = 0;
+ foreach(lc, wc->orderClause)
+ {
+ SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
+ TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist);
+
+ Assert(OidIsValid(sgc->eqop));
+ ordColIdx[ordNumCols] = tle->resno;
+ ordOperators[ordNumCols] = sgc->eqop;
+ ordCollations[ordNumCols] = exprCollation((Node *) tle->expr);
+ ordNumCols++;
+ }
+
+ /* And finally we can make the WindowAgg node */
+ plan = make_windowagg(tlist,
+ wc->winref,
+ partNumCols,
+ partColIdx,
+ partOperators,
+ partCollations,
+ ordNumCols,
+ ordColIdx,
+ ordOperators,
+ ordCollations,
+ wc->frameOptions,
+ wc->startOffset,
+ wc->endOffset,
+ wc->startInRangeFunc,
+ wc->endInRangeFunc,
+ wc->inRangeColl,
+ wc->inRangeAsc,
+ wc->inRangeNullsFirst,
+ wc->runCondition,
+ best_path->qual,
+ best_path->topwindow,
+ subplan);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_setop_plan
+ *
+ * Create a SetOp plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+static SetOp *
+create_setop_plan(PlannerInfo *root, SetOpPath *best_path, int flags)
+{
+ SetOp *plan;
+ Plan *subplan;
+ long numGroups;
+
+ /*
+ * SetOp doesn't project, so tlist requirements pass through; moreover we
+ * need grouping columns to be labeled.
+ */
+ subplan = create_plan_recurse(root, best_path->subpath,
+ flags | CP_LABEL_TLIST);
+
+ /* Convert numGroups to long int --- but 'ware overflow! */
+ numGroups = clamp_cardinality_to_long(best_path->numGroups);
+
+ plan = make_setop(best_path->cmd,
+ best_path->strategy,
+ subplan,
+ best_path->distinctList,
+ best_path->flagColIdx,
+ best_path->firstFlag,
+ numGroups);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_recursiveunion_plan
+ *
+ * Create a RecursiveUnion plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+static RecursiveUnion *
+create_recursiveunion_plan(PlannerInfo *root, RecursiveUnionPath *best_path)
+{
+ RecursiveUnion *plan;
+ Plan *leftplan;
+ Plan *rightplan;
+ List *tlist;
+ long numGroups;
+
+ /* Need both children to produce same tlist, so force it */
+ leftplan = create_plan_recurse(root, best_path->leftpath, CP_EXACT_TLIST);
+ rightplan = create_plan_recurse(root, best_path->rightpath, CP_EXACT_TLIST);
+
+ tlist = build_path_tlist(root, &best_path->path);
+
+ /* Convert numGroups to long int --- but 'ware overflow! */
+ numGroups = clamp_cardinality_to_long(best_path->numGroups);
+
+ plan = make_recursive_union(tlist,
+ leftplan,
+ rightplan,
+ best_path->wtParam,
+ best_path->distinctList,
+ numGroups);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_lockrows_plan
+ *
+ * Create a LockRows plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+static LockRows *
+create_lockrows_plan(PlannerInfo *root, LockRowsPath *best_path,
+ int flags)
+{
+ LockRows *plan;
+ Plan *subplan;
+
+ /* LockRows doesn't project, so tlist requirements pass through */
+ subplan = create_plan_recurse(root, best_path->subpath, flags);
+
+ plan = make_lockrows(subplan, best_path->rowMarks, best_path->epqParam);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_modifytable_plan
+ * Create a ModifyTable plan for 'best_path'.
+ *
+ * Returns a Plan node.
+ */
+static ModifyTable *
+create_modifytable_plan(PlannerInfo *root, ModifyTablePath *best_path)
+{
+ ModifyTable *plan;
+ Path *subpath = best_path->subpath;
+ Plan *subplan;
+
+ /* Subplan must produce exactly the specified tlist */
+ subplan = create_plan_recurse(root, subpath, CP_EXACT_TLIST);
+
+ /* Transfer resname/resjunk labeling, too, to keep executor happy */
+ apply_tlist_labeling(subplan->targetlist, root->processed_tlist);
+
+ plan = make_modifytable(root,
+ subplan,
+ best_path->operation,
+ best_path->canSetTag,
+ best_path->nominalRelation,
+ best_path->rootRelation,
+ best_path->partColsUpdated,
+ best_path->resultRelations,
+ best_path->updateColnosLists,
+ best_path->withCheckOptionLists,
+ best_path->returningLists,
+ best_path->rowMarks,
+ best_path->onconflict,
+ best_path->mergeActionLists,
+ best_path->epqParam);
+
+ copy_generic_path_info(&plan->plan, &best_path->path);
+
+ return plan;
+}
+
+/*
+ * create_limit_plan
+ *
+ * Create a Limit plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ */
+static Limit *
+create_limit_plan(PlannerInfo *root, LimitPath *best_path, int flags)
+{
+ Limit *plan;
+ Plan *subplan;
+ int numUniqkeys = 0;
+ AttrNumber *uniqColIdx = NULL;
+ Oid *uniqOperators = NULL;
+ Oid *uniqCollations = NULL;
+
+ /* Limit doesn't project, so tlist requirements pass through */
+ subplan = create_plan_recurse(root, best_path->subpath, flags);
+
+ /* Extract information necessary for comparing rows for WITH TIES. */
+ if (best_path->limitOption == LIMIT_OPTION_WITH_TIES)
+ {
+ Query *parse = root->parse;
+ ListCell *l;
+
+ numUniqkeys = list_length(parse->sortClause);
+ uniqColIdx = (AttrNumber *) palloc(numUniqkeys * sizeof(AttrNumber));
+ uniqOperators = (Oid *) palloc(numUniqkeys * sizeof(Oid));
+ uniqCollations = (Oid *) palloc(numUniqkeys * sizeof(Oid));
+
+ numUniqkeys = 0;
+ foreach(l, parse->sortClause)
+ {
+ SortGroupClause *sortcl = (SortGroupClause *) lfirst(l);
+ TargetEntry *tle = get_sortgroupclause_tle(sortcl, parse->targetList);
+
+ uniqColIdx[numUniqkeys] = tle->resno;
+ uniqOperators[numUniqkeys] = sortcl->eqop;
+ uniqCollations[numUniqkeys] = exprCollation((Node *) tle->expr);
+ numUniqkeys++;
+ }
+ }
+
+ plan = make_limit(subplan,
+ best_path->limitOffset,
+ best_path->limitCount,
+ best_path->limitOption,
+ numUniqkeys, uniqColIdx, uniqOperators, uniqCollations);
+
+ copy_generic_path_info(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+
+/*****************************************************************************
+ *
+ * BASE-RELATION SCAN METHODS
+ *
+ *****************************************************************************/
+
+
+/*
+ * create_seqscan_plan
+ * Returns a seqscan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static SeqScan *
+create_seqscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
+{
+ SeqScan *scan_plan;
+ Index scan_relid = best_path->parent->relid;
+
+ /* it should be a base rel... */
+ Assert(scan_relid > 0);
+ Assert(best_path->parent->rtekind == RTE_RELATION);
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->param_info)
+ {
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ }
+
+ scan_plan = make_seqscan(tlist,
+ scan_clauses,
+ scan_relid);
+
+ copy_generic_path_info(&scan_plan->scan.plan, best_path);
+
+ return scan_plan;
+}
+
+/*
+ * create_samplescan_plan
+ * Returns a samplescan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static SampleScan *
+create_samplescan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
+{
+ SampleScan *scan_plan;
+ Index scan_relid = best_path->parent->relid;
+ RangeTblEntry *rte;
+ TableSampleClause *tsc;
+
+ /* it should be a base rel with a tablesample clause... */
+ Assert(scan_relid > 0);
+ rte = planner_rt_fetch(scan_relid, root);
+ Assert(rte->rtekind == RTE_RELATION);
+ tsc = rte->tablesample;
+ Assert(tsc != NULL);
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->param_info)
+ {
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ tsc = (TableSampleClause *)
+ replace_nestloop_params(root, (Node *) tsc);
+ }
+
+ scan_plan = make_samplescan(tlist,
+ scan_clauses,
+ scan_relid,
+ tsc);
+
+ copy_generic_path_info(&scan_plan->scan.plan, best_path);
+
+ return scan_plan;
+}
+
+/*
+ * create_indexscan_plan
+ * Returns an indexscan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ *
+ * We use this for both plain IndexScans and IndexOnlyScans, because the
+ * qual preprocessing work is the same for both. Note that the caller tells
+ * us which to build --- we don't look at best_path->path.pathtype, because
+ * create_bitmap_subplan needs to be able to override the prior decision.
+ */
+static Scan *
+create_indexscan_plan(PlannerInfo *root,
+ IndexPath *best_path,
+ List *tlist,
+ List *scan_clauses,
+ bool indexonly)
+{
+ Scan *scan_plan;
+ List *indexclauses = best_path->indexclauses;
+ List *indexorderbys = best_path->indexorderbys;
+ Index baserelid = best_path->path.parent->relid;
+ IndexOptInfo *indexinfo = best_path->indexinfo;
+ Oid indexoid = indexinfo->indexoid;
+ List *qpqual;
+ List *stripped_indexquals;
+ List *fixed_indexquals;
+ List *fixed_indexorderbys;
+ List *indexorderbyops = NIL;
+ ListCell *l;
+
+ /* it should be a base rel... */
+ Assert(baserelid > 0);
+ Assert(best_path->path.parent->rtekind == RTE_RELATION);
+
+ /*
+ * Extract the index qual expressions (stripped of RestrictInfos) from the
+ * IndexClauses list, and prepare a copy with index Vars substituted for
+ * table Vars. (This step also does replace_nestloop_params on the
+ * fixed_indexquals.)
+ */
+ fix_indexqual_references(root, best_path,
+ &stripped_indexquals,
+ &fixed_indexquals);
+
+ /*
+ * Likewise fix up index attr references in the ORDER BY expressions.
+ */
+ fixed_indexorderbys = fix_indexorderby_references(root, best_path);
+
+ /*
+ * The qpqual list must contain all restrictions not automatically handled
+ * by the index, other than pseudoconstant clauses which will be handled
+ * by a separate gating plan node. All the predicates in the indexquals
+ * will be checked (either by the index itself, or by nodeIndexscan.c),
+ * but if there are any "special" operators involved then they must be
+ * included in qpqual. The upshot is that qpqual must contain
+ * scan_clauses minus whatever appears in indexquals.
+ *
+ * is_redundant_with_indexclauses() detects cases where a scan clause is
+ * present in the indexclauses list or is generated from the same
+ * EquivalenceClass as some indexclause, and is therefore redundant with
+ * it, though not equal. (The latter happens when indxpath.c prefers a
+ * different derived equality than what generate_join_implied_equalities
+ * picked for a parameterized scan's ppi_clauses.) Note that it will not
+ * match to lossy index clauses, which is critical because we have to
+ * include the original clause in qpqual in that case.
+ *
+ * In some situations (particularly with OR'd index conditions) we may
+ * have scan_clauses that are not equal to, but are logically implied by,
+ * the index quals; so we also try a predicate_implied_by() check to see
+ * if we can discard quals that way. (predicate_implied_by assumes its
+ * first input contains only immutable functions, so we have to check
+ * that.)
+ *
+ * Note: if you change this bit of code you should also look at
+ * extract_nonindex_conditions() in costsize.c.
+ */
+ qpqual = NIL;
+ foreach(l, scan_clauses)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+
+ if (rinfo->pseudoconstant)
+ continue; /* we may drop pseudoconstants here */
+ if (is_redundant_with_indexclauses(rinfo, indexclauses))
+ continue; /* dup or derived from same EquivalenceClass */
+ if (!contain_mutable_functions((Node *) rinfo->clause) &&
+ predicate_implied_by(list_make1(rinfo->clause), stripped_indexquals,
+ false))
+ continue; /* provably implied by indexquals */
+ qpqual = lappend(qpqual, rinfo);
+ }
+
+ /* Sort clauses into best execution order */
+ qpqual = order_qual_clauses(root, qpqual);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ qpqual = extract_actual_clauses(qpqual, false);
+
+ /*
+ * We have to replace any outer-relation variables with nestloop params in
+ * the indexqualorig, qpqual, and indexorderbyorig expressions. A bit
+ * annoying to have to do this separately from the processing in
+ * fix_indexqual_references --- rethink this when generalizing the inner
+ * indexscan support. But note we can't really do this earlier because
+ * it'd break the comparisons to predicates above ... (or would it? Those
+ * wouldn't have outer refs)
+ */
+ if (best_path->path.param_info)
+ {
+ stripped_indexquals = (List *)
+ replace_nestloop_params(root, (Node *) stripped_indexquals);
+ qpqual = (List *)
+ replace_nestloop_params(root, (Node *) qpqual);
+ indexorderbys = (List *)
+ replace_nestloop_params(root, (Node *) indexorderbys);
+ }
+
+ /*
+ * If there are ORDER BY expressions, look up the sort operators for their
+ * result datatypes.
+ */
+ if (indexorderbys)
+ {
+ ListCell *pathkeyCell,
+ *exprCell;
+
+ /*
+ * PathKey contains OID of the btree opfamily we're sorting by, but
+ * that's not quite enough because we need the expression's datatype
+ * to look up the sort operator in the operator family.
+ */
+ Assert(list_length(best_path->path.pathkeys) == list_length(indexorderbys));
+ forboth(pathkeyCell, best_path->path.pathkeys, exprCell, indexorderbys)
+ {
+ PathKey *pathkey = (PathKey *) lfirst(pathkeyCell);
+ Node *expr = (Node *) lfirst(exprCell);
+ Oid exprtype = exprType(expr);
+ Oid sortop;
+
+ /* Get sort operator from opfamily */
+ sortop = get_opfamily_member(pathkey->pk_opfamily,
+ exprtype,
+ exprtype,
+ pathkey->pk_strategy);
+ if (!OidIsValid(sortop))
+ elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
+ pathkey->pk_strategy, exprtype, exprtype, pathkey->pk_opfamily);
+ indexorderbyops = lappend_oid(indexorderbyops, sortop);
+ }
+ }
+
+ /*
+ * For an index-only scan, we must mark indextlist entries as resjunk if
+ * they are columns that the index AM can't return; this cues setrefs.c to
+ * not generate references to those columns.
+ */
+ if (indexonly)
+ {
+ int i = 0;
+
+ foreach(l, indexinfo->indextlist)
+ {
+ TargetEntry *indextle = (TargetEntry *) lfirst(l);
+
+ indextle->resjunk = !indexinfo->canreturn[i];
+ i++;
+ }
+ }
+
+ /* Finally ready to build the plan node */
+ if (indexonly)
+ scan_plan = (Scan *) make_indexonlyscan(tlist,
+ qpqual,
+ baserelid,
+ indexoid,
+ fixed_indexquals,
+ stripped_indexquals,
+ fixed_indexorderbys,
+ indexinfo->indextlist,
+ best_path->indexscandir);
+ else
+ scan_plan = (Scan *) make_indexscan(tlist,
+ qpqual,
+ baserelid,
+ indexoid,
+ fixed_indexquals,
+ stripped_indexquals,
+ fixed_indexorderbys,
+ indexorderbys,
+ indexorderbyops,
+ best_path->indexscandir);
+
+ copy_generic_path_info(&scan_plan->plan, &best_path->path);
+
+ return scan_plan;
+}
+
+/*
+ * create_bitmap_scan_plan
+ * Returns a bitmap scan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static BitmapHeapScan *
+create_bitmap_scan_plan(PlannerInfo *root,
+ BitmapHeapPath *best_path,
+ List *tlist,
+ List *scan_clauses)
+{
+ Index baserelid = best_path->path.parent->relid;
+ Plan *bitmapqualplan;
+ List *bitmapqualorig;
+ List *indexquals;
+ List *indexECs;
+ List *qpqual;
+ ListCell *l;
+ BitmapHeapScan *scan_plan;
+
+ /* it should be a base rel... */
+ Assert(baserelid > 0);
+ Assert(best_path->path.parent->rtekind == RTE_RELATION);
+
+ /* Process the bitmapqual tree into a Plan tree and qual lists */
+ bitmapqualplan = create_bitmap_subplan(root, best_path->bitmapqual,
+ &bitmapqualorig, &indexquals,
+ &indexECs);
+
+ if (best_path->path.parallel_aware)
+ bitmap_subplan_mark_shared(bitmapqualplan);
+
+ /*
+ * The qpqual list must contain all restrictions not automatically handled
+ * by the index, other than pseudoconstant clauses which will be handled
+ * by a separate gating plan node. All the predicates in the indexquals
+ * will be checked (either by the index itself, or by
+ * nodeBitmapHeapscan.c), but if there are any "special" operators
+ * involved then they must be added to qpqual. The upshot is that qpqual
+ * must contain scan_clauses minus whatever appears in indexquals.
+ *
+ * This loop is similar to the comparable code in create_indexscan_plan(),
+ * but with some differences because it has to compare the scan clauses to
+ * stripped (no RestrictInfos) indexquals. See comments there for more
+ * info.
+ *
+ * In normal cases simple equal() checks will be enough to spot duplicate
+ * clauses, so we try that first. We next see if the scan clause is
+ * redundant with any top-level indexqual by virtue of being generated
+ * from the same EC. After that, try predicate_implied_by().
+ *
+ * Unlike create_indexscan_plan(), the predicate_implied_by() test here is
+ * useful for getting rid of qpquals that are implied by index predicates,
+ * because the predicate conditions are included in the "indexquals"
+ * returned by create_bitmap_subplan(). Bitmap scans have to do it that
+ * way because predicate conditions need to be rechecked if the scan
+ * becomes lossy, so they have to be included in bitmapqualorig.
+ */
+ qpqual = NIL;
+ foreach(l, scan_clauses)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+ Node *clause = (Node *) rinfo->clause;
+
+ if (rinfo->pseudoconstant)
+ continue; /* we may drop pseudoconstants here */
+ if (list_member(indexquals, clause))
+ continue; /* simple duplicate */
+ if (rinfo->parent_ec && list_member_ptr(indexECs, rinfo->parent_ec))
+ continue; /* derived from same EquivalenceClass */
+ if (!contain_mutable_functions(clause) &&
+ predicate_implied_by(list_make1(clause), indexquals, false))
+ continue; /* provably implied by indexquals */
+ qpqual = lappend(qpqual, rinfo);
+ }
+
+ /* Sort clauses into best execution order */
+ qpqual = order_qual_clauses(root, qpqual);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ qpqual = extract_actual_clauses(qpqual, false);
+
+ /*
+ * When dealing with special operators, we will at this point have
+ * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
+ * 'em from bitmapqualorig, since there's no point in making the tests
+ * twice.
+ */
+ bitmapqualorig = list_difference_ptr(bitmapqualorig, qpqual);
+
+ /*
+ * We have to replace any outer-relation variables with nestloop params in
+ * the qpqual and bitmapqualorig expressions. (This was already done for
+ * expressions attached to plan nodes in the bitmapqualplan tree.)
+ */
+ if (best_path->path.param_info)
+ {
+ qpqual = (List *)
+ replace_nestloop_params(root, (Node *) qpqual);
+ bitmapqualorig = (List *)
+ replace_nestloop_params(root, (Node *) bitmapqualorig);
+ }
+
+ /* Finally ready to build the plan node */
+ scan_plan = make_bitmap_heapscan(tlist,
+ qpqual,
+ bitmapqualplan,
+ bitmapqualorig,
+ baserelid);
+
+ copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
+
+ return scan_plan;
+}
+
+/*
+ * Given a bitmapqual tree, generate the Plan tree that implements it
+ *
+ * As byproducts, we also return in *qual and *indexqual the qual lists
+ * (in implicit-AND form, without RestrictInfos) describing the original index
+ * conditions and the generated indexqual conditions. (These are the same in
+ * simple cases, but when special index operators are involved, the former
+ * list includes the special conditions while the latter includes the actual
+ * indexable conditions derived from them.) Both lists include partial-index
+ * predicates, because we have to recheck predicates as well as index
+ * conditions if the bitmap scan becomes lossy.
+ *
+ * In addition, we return a list of EquivalenceClass pointers for all the
+ * top-level indexquals that were possibly-redundantly derived from ECs.
+ * This allows removal of scan_clauses that are redundant with such quals.
+ * (We do not attempt to detect such redundancies for quals that are within
+ * OR subtrees. This could be done in a less hacky way if we returned the
+ * indexquals in RestrictInfo form, but that would be slower and still pretty
+ * messy, since we'd have to build new RestrictInfos in many cases.)
+ */
+static Plan *
+create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
+ List **qual, List **indexqual, List **indexECs)
+{
+ Plan *plan;
+
+ if (IsA(bitmapqual, BitmapAndPath))
+ {
+ BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
+ List *subplans = NIL;
+ List *subquals = NIL;
+ List *subindexquals = NIL;
+ List *subindexECs = NIL;
+ ListCell *l;
+
+ /*
+ * There may well be redundant quals among the subplans, since a
+ * top-level WHERE qual might have gotten used to form several
+ * different index quals. We don't try exceedingly hard to eliminate
+ * redundancies, but we do eliminate obvious duplicates by using
+ * list_concat_unique.
+ */
+ foreach(l, apath->bitmapquals)
+ {
+ Plan *subplan;
+ List *subqual;
+ List *subindexqual;
+ List *subindexEC;
+
+ subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
+ &subqual, &subindexqual,
+ &subindexEC);
+ subplans = lappend(subplans, subplan);
+ subquals = list_concat_unique(subquals, subqual);
+ subindexquals = list_concat_unique(subindexquals, subindexqual);
+ /* Duplicates in indexECs aren't worth getting rid of */
+ subindexECs = list_concat(subindexECs, subindexEC);
+ }
+ plan = (Plan *) make_bitmap_and(subplans);
+ plan->startup_cost = apath->path.startup_cost;
+ plan->total_cost = apath->path.total_cost;
+ plan->plan_rows =
+ clamp_row_est(apath->bitmapselectivity * apath->path.parent->tuples);
+ plan->plan_width = 0; /* meaningless */
+ plan->parallel_aware = false;
+ plan->parallel_safe = apath->path.parallel_safe;
+ *qual = subquals;
+ *indexqual = subindexquals;
+ *indexECs = subindexECs;
+ }
+ else if (IsA(bitmapqual, BitmapOrPath))
+ {
+ BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
+ List *subplans = NIL;
+ List *subquals = NIL;
+ List *subindexquals = NIL;
+ bool const_true_subqual = false;
+ bool const_true_subindexqual = false;
+ ListCell *l;
+
+ /*
+ * Here, we only detect qual-free subplans. A qual-free subplan would
+ * cause us to generate "... OR true ..." which we may as well reduce
+ * to just "true". We do not try to eliminate redundant subclauses
+ * because (a) it's not as likely as in the AND case, and (b) we might
+ * well be working with hundreds or even thousands of OR conditions,
+ * perhaps from a long IN list. The performance of list_append_unique
+ * would be unacceptable.
+ */
+ foreach(l, opath->bitmapquals)
+ {
+ Plan *subplan;
+ List *subqual;
+ List *subindexqual;
+ List *subindexEC;
+
+ subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
+ &subqual, &subindexqual,
+ &subindexEC);
+ subplans = lappend(subplans, subplan);
+ if (subqual == NIL)
+ const_true_subqual = true;
+ else if (!const_true_subqual)
+ subquals = lappend(subquals,
+ make_ands_explicit(subqual));
+ if (subindexqual == NIL)
+ const_true_subindexqual = true;
+ else if (!const_true_subindexqual)
+ subindexquals = lappend(subindexquals,
+ make_ands_explicit(subindexqual));
+ }
+
+ /*
+ * In the presence of ScalarArrayOpExpr quals, we might have built
+ * BitmapOrPaths with just one subpath; don't add an OR step.
+ */
+ if (list_length(subplans) == 1)
+ {
+ plan = (Plan *) linitial(subplans);
+ }
+ else
+ {
+ plan = (Plan *) make_bitmap_or(subplans);
+ plan->startup_cost = opath->path.startup_cost;
+ plan->total_cost = opath->path.total_cost;
+ plan->plan_rows =
+ clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
+ plan->plan_width = 0; /* meaningless */
+ plan->parallel_aware = false;
+ plan->parallel_safe = opath->path.parallel_safe;
+ }
+
+ /*
+ * If there were constant-TRUE subquals, the OR reduces to constant
+ * TRUE. Also, avoid generating one-element ORs, which could happen
+ * due to redundancy elimination or ScalarArrayOpExpr quals.
+ */
+ if (const_true_subqual)
+ *qual = NIL;
+ else if (list_length(subquals) <= 1)
+ *qual = subquals;
+ else
+ *qual = list_make1(make_orclause(subquals));
+ if (const_true_subindexqual)
+ *indexqual = NIL;
+ else if (list_length(subindexquals) <= 1)
+ *indexqual = subindexquals;
+ else
+ *indexqual = list_make1(make_orclause(subindexquals));
+ *indexECs = NIL;
+ }
+ else if (IsA(bitmapqual, IndexPath))
+ {
+ IndexPath *ipath = (IndexPath *) bitmapqual;
+ IndexScan *iscan;
+ List *subquals;
+ List *subindexquals;
+ List *subindexECs;
+ ListCell *l;
+
+ /* Use the regular indexscan plan build machinery... */
+ iscan = castNode(IndexScan,
+ create_indexscan_plan(root, ipath,
+ NIL, NIL, false));
+ /* then convert to a bitmap indexscan */
+ plan = (Plan *) make_bitmap_indexscan(iscan->scan.scanrelid,
+ iscan->indexid,
+ iscan->indexqual,
+ iscan->indexqualorig);
+ /* and set its cost/width fields appropriately */
+ plan->startup_cost = 0.0;
+ plan->total_cost = ipath->indextotalcost;
+ plan->plan_rows =
+ clamp_row_est(ipath->indexselectivity * ipath->path.parent->tuples);
+ plan->plan_width = 0; /* meaningless */
+ plan->parallel_aware = false;
+ plan->parallel_safe = ipath->path.parallel_safe;
+ /* Extract original index clauses, actual index quals, relevant ECs */
+ subquals = NIL;
+ subindexquals = NIL;
+ subindexECs = NIL;
+ foreach(l, ipath->indexclauses)
+ {
+ IndexClause *iclause = (IndexClause *) lfirst(l);
+ RestrictInfo *rinfo = iclause->rinfo;
+
+ Assert(!rinfo->pseudoconstant);
+ subquals = lappend(subquals, rinfo->clause);
+ subindexquals = list_concat(subindexquals,
+ get_actual_clauses(iclause->indexquals));
+ if (rinfo->parent_ec)
+ subindexECs = lappend(subindexECs, rinfo->parent_ec);
+ }
+ /* We can add any index predicate conditions, too */
+ foreach(l, ipath->indexinfo->indpred)
+ {
+ Expr *pred = (Expr *) lfirst(l);
+
+ /*
+ * We know that the index predicate must have been implied by the
+ * query condition as a whole, but it may or may not be implied by
+ * the conditions that got pushed into the bitmapqual. Avoid
+ * generating redundant conditions.
+ */
+ if (!predicate_implied_by(list_make1(pred), subquals, false))
+ {
+ subquals = lappend(subquals, pred);
+ subindexquals = lappend(subindexquals, pred);
+ }
+ }
+ *qual = subquals;
+ *indexqual = subindexquals;
+ *indexECs = subindexECs;
+ }
+ else
+ {
+ elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
+ plan = NULL; /* keep compiler quiet */
+ }
+
+ return plan;
+}
+
+/*
+ * create_tidscan_plan
+ * Returns a tidscan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static TidScan *
+create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
+ List *tlist, List *scan_clauses)
+{
+ TidScan *scan_plan;
+ Index scan_relid = best_path->path.parent->relid;
+ List *tidquals = best_path->tidquals;
+
+ /* it should be a base rel... */
+ Assert(scan_relid > 0);
+ Assert(best_path->path.parent->rtekind == RTE_RELATION);
+
+ /*
+ * The qpqual list must contain all restrictions not enforced by the
+ * tidquals list. Since tidquals has OR semantics, we have to be careful
+ * about matching it up to scan_clauses. It's convenient to handle the
+ * single-tidqual case separately from the multiple-tidqual case. In the
+ * single-tidqual case, we look through the scan_clauses while they are
+ * still in RestrictInfo form, and drop any that are redundant with the
+ * tidqual.
+ *
+ * In normal cases simple pointer equality checks will be enough to spot
+ * duplicate RestrictInfos, so we try that first.
+ *
+ * Another common case is that a scan_clauses entry is generated from the
+ * same EquivalenceClass as some tidqual, and is therefore redundant with
+ * it, though not equal.
+ *
+ * Unlike indexpaths, we don't bother with predicate_implied_by(); the
+ * number of cases where it could win are pretty small.
+ */
+ if (list_length(tidquals) == 1)
+ {
+ List *qpqual = NIL;
+ ListCell *l;
+
+ foreach(l, scan_clauses)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+
+ if (rinfo->pseudoconstant)
+ continue; /* we may drop pseudoconstants here */
+ if (list_member_ptr(tidquals, rinfo))
+ continue; /* simple duplicate */
+ if (is_redundant_derived_clause(rinfo, tidquals))
+ continue; /* derived from same EquivalenceClass */
+ qpqual = lappend(qpqual, rinfo);
+ }
+ scan_clauses = qpqual;
+ }
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo lists to bare expressions; ignore pseudoconstants */
+ tidquals = extract_actual_clauses(tidquals, false);
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /*
+ * If we have multiple tidquals, it's more convenient to remove duplicate
+ * scan_clauses after stripping the RestrictInfos. In this situation,
+ * because the tidquals represent OR sub-clauses, they could not have come
+ * from EquivalenceClasses so we don't have to worry about matching up
+ * non-identical clauses. On the other hand, because tidpath.c will have
+ * extracted those sub-clauses from some OR clause and built its own list,
+ * we will certainly not have pointer equality to any scan clause. So
+ * convert the tidquals list to an explicit OR clause and see if we can
+ * match it via equal() to any scan clause.
+ */
+ if (list_length(tidquals) > 1)
+ scan_clauses = list_difference(scan_clauses,
+ list_make1(make_orclause(tidquals)));
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->path.param_info)
+ {
+ tidquals = (List *)
+ replace_nestloop_params(root, (Node *) tidquals);
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ }
+
+ scan_plan = make_tidscan(tlist,
+ scan_clauses,
+ scan_relid,
+ tidquals);
+
+ copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
+
+ return scan_plan;
+}
+
+/*
+ * create_tidrangescan_plan
+ * Returns a tidrangescan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static TidRangeScan *
+create_tidrangescan_plan(PlannerInfo *root, TidRangePath *best_path,
+ List *tlist, List *scan_clauses)
+{
+ TidRangeScan *scan_plan;
+ Index scan_relid = best_path->path.parent->relid;
+ List *tidrangequals = best_path->tidrangequals;
+
+ /* it should be a base rel... */
+ Assert(scan_relid > 0);
+ Assert(best_path->path.parent->rtekind == RTE_RELATION);
+
+ /*
+ * The qpqual list must contain all restrictions not enforced by the
+ * tidrangequals list. tidrangequals has AND semantics, so we can simply
+ * remove any qual that appears in it.
+ */
+ {
+ List *qpqual = NIL;
+ ListCell *l;
+
+ foreach(l, scan_clauses)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+
+ if (rinfo->pseudoconstant)
+ continue; /* we may drop pseudoconstants here */
+ if (list_member_ptr(tidrangequals, rinfo))
+ continue; /* simple duplicate */
+ qpqual = lappend(qpqual, rinfo);
+ }
+ scan_clauses = qpqual;
+ }
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo lists to bare expressions; ignore pseudoconstants */
+ tidrangequals = extract_actual_clauses(tidrangequals, false);
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->path.param_info)
+ {
+ tidrangequals = (List *)
+ replace_nestloop_params(root, (Node *) tidrangequals);
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ }
+
+ scan_plan = make_tidrangescan(tlist,
+ scan_clauses,
+ scan_relid,
+ tidrangequals);
+
+ copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
+
+ return scan_plan;
+}
+
+/*
+ * create_subqueryscan_plan
+ * Returns a subqueryscan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static SubqueryScan *
+create_subqueryscan_plan(PlannerInfo *root, SubqueryScanPath *best_path,
+ List *tlist, List *scan_clauses)
+{
+ SubqueryScan *scan_plan;
+ RelOptInfo *rel = best_path->path.parent;
+ Index scan_relid = rel->relid;
+ Plan *subplan;
+
+ /* it should be a subquery base rel... */
+ Assert(scan_relid > 0);
+ Assert(rel->rtekind == RTE_SUBQUERY);
+
+ /*
+ * Recursively create Plan from Path for subquery. Since we are entering
+ * a different planner context (subroot), recurse to create_plan not
+ * create_plan_recurse.
+ */
+ subplan = create_plan(rel->subroot, best_path->subpath);
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->path.param_info)
+ {
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ process_subquery_nestloop_params(root,
+ rel->subplan_params);
+ }
+
+ scan_plan = make_subqueryscan(tlist,
+ scan_clauses,
+ scan_relid,
+ subplan);
+
+ copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
+
+ return scan_plan;
+}
+
+/*
+ * create_functionscan_plan
+ * Returns a functionscan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static FunctionScan *
+create_functionscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
+{
+ FunctionScan *scan_plan;
+ Index scan_relid = best_path->parent->relid;
+ RangeTblEntry *rte;
+ List *functions;
+
+ /* it should be a function base rel... */
+ Assert(scan_relid > 0);
+ rte = planner_rt_fetch(scan_relid, root);
+ Assert(rte->rtekind == RTE_FUNCTION);
+ functions = rte->functions;
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->param_info)
+ {
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ /* The function expressions could contain nestloop params, too */
+ functions = (List *) replace_nestloop_params(root, (Node *) functions);
+ }
+
+ scan_plan = make_functionscan(tlist, scan_clauses, scan_relid,
+ functions, rte->funcordinality);
+
+ copy_generic_path_info(&scan_plan->scan.plan, best_path);
+
+ return scan_plan;
+}
+
+/*
+ * create_tablefuncscan_plan
+ * Returns a tablefuncscan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static TableFuncScan *
+create_tablefuncscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
+{
+ TableFuncScan *scan_plan;
+ Index scan_relid = best_path->parent->relid;
+ RangeTblEntry *rte;
+ TableFunc *tablefunc;
+
+ /* it should be a function base rel... */
+ Assert(scan_relid > 0);
+ rte = planner_rt_fetch(scan_relid, root);
+ Assert(rte->rtekind == RTE_TABLEFUNC);
+ tablefunc = rte->tablefunc;
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->param_info)
+ {
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ /* The function expressions could contain nestloop params, too */
+ tablefunc = (TableFunc *) replace_nestloop_params(root, (Node *) tablefunc);
+ }
+
+ scan_plan = make_tablefuncscan(tlist, scan_clauses, scan_relid,
+ tablefunc);
+
+ copy_generic_path_info(&scan_plan->scan.plan, best_path);
+
+ return scan_plan;
+}
+
+/*
+ * create_valuesscan_plan
+ * Returns a valuesscan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static ValuesScan *
+create_valuesscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
+{
+ ValuesScan *scan_plan;
+ Index scan_relid = best_path->parent->relid;
+ RangeTblEntry *rte;
+ List *values_lists;
+
+ /* it should be a values base rel... */
+ Assert(scan_relid > 0);
+ rte = planner_rt_fetch(scan_relid, root);
+ Assert(rte->rtekind == RTE_VALUES);
+ values_lists = rte->values_lists;
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->param_info)
+ {
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ /* The values lists could contain nestloop params, too */
+ values_lists = (List *)
+ replace_nestloop_params(root, (Node *) values_lists);
+ }
+
+ scan_plan = make_valuesscan(tlist, scan_clauses, scan_relid,
+ values_lists);
+
+ copy_generic_path_info(&scan_plan->scan.plan, best_path);
+
+ return scan_plan;
+}
+
+/*
+ * create_ctescan_plan
+ * Returns a ctescan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static CteScan *
+create_ctescan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
+{
+ CteScan *scan_plan;
+ Index scan_relid = best_path->parent->relid;
+ RangeTblEntry *rte;
+ SubPlan *ctesplan = NULL;
+ int plan_id;
+ int cte_param_id;
+ PlannerInfo *cteroot;
+ Index levelsup;
+ int ndx;
+ ListCell *lc;
+
+ Assert(scan_relid > 0);
+ rte = planner_rt_fetch(scan_relid, root);
+ Assert(rte->rtekind == RTE_CTE);
+ Assert(!rte->self_reference);
+
+ /*
+ * Find the referenced CTE, and locate the SubPlan previously made for it.
+ */
+ levelsup = rte->ctelevelsup;
+ cteroot = root;
+ while (levelsup-- > 0)
+ {
+ cteroot = cteroot->parent_root;
+ if (!cteroot) /* shouldn't happen */
+ elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
+ }
+
+ /*
+ * Note: cte_plan_ids can be shorter than cteList, if we are still working
+ * on planning the CTEs (ie, this is a side-reference from another CTE).
+ * So we mustn't use forboth here.
+ */
+ ndx = 0;
+ foreach(lc, cteroot->parse->cteList)
+ {
+ CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
+
+ if (strcmp(cte->ctename, rte->ctename) == 0)
+ break;
+ ndx++;
+ }
+ if (lc == NULL) /* shouldn't happen */
+ elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
+ if (ndx >= list_length(cteroot->cte_plan_ids))
+ elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
+ plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
+ if (plan_id <= 0)
+ elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
+ foreach(lc, cteroot->init_plans)
+ {
+ ctesplan = (SubPlan *) lfirst(lc);
+ if (ctesplan->plan_id == plan_id)
+ break;
+ }
+ if (lc == NULL) /* shouldn't happen */
+ elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
+
+ /*
+ * We need the CTE param ID, which is the sole member of the SubPlan's
+ * setParam list.
+ */
+ cte_param_id = linitial_int(ctesplan->setParam);
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->param_info)
+ {
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ }
+
+ scan_plan = make_ctescan(tlist, scan_clauses, scan_relid,
+ plan_id, cte_param_id);
+
+ copy_generic_path_info(&scan_plan->scan.plan, best_path);
+
+ return scan_plan;
+}
+
+/*
+ * create_namedtuplestorescan_plan
+ * Returns a tuplestorescan plan for the base relation scanned by
+ * 'best_path' with restriction clauses 'scan_clauses' and targetlist
+ * 'tlist'.
+ */
+static NamedTuplestoreScan *
+create_namedtuplestorescan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
+{
+ NamedTuplestoreScan *scan_plan;
+ Index scan_relid = best_path->parent->relid;
+ RangeTblEntry *rte;
+
+ Assert(scan_relid > 0);
+ rte = planner_rt_fetch(scan_relid, root);
+ Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->param_info)
+ {
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ }
+
+ scan_plan = make_namedtuplestorescan(tlist, scan_clauses, scan_relid,
+ rte->enrname);
+
+ copy_generic_path_info(&scan_plan->scan.plan, best_path);
+
+ return scan_plan;
+}
+
+/*
+ * create_resultscan_plan
+ * Returns a Result plan for the RTE_RESULT base relation scanned by
+ * 'best_path' with restriction clauses 'scan_clauses' and targetlist
+ * 'tlist'.
+ */
+static Result *
+create_resultscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
+{
+ Result *scan_plan;
+ Index scan_relid = best_path->parent->relid;
+ RangeTblEntry *rte PG_USED_FOR_ASSERTS_ONLY;
+
+ Assert(scan_relid > 0);
+ rte = planner_rt_fetch(scan_relid, root);
+ Assert(rte->rtekind == RTE_RESULT);
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->param_info)
+ {
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ }
+
+ scan_plan = make_result(tlist, (Node *) scan_clauses, NULL);
+
+ copy_generic_path_info(&scan_plan->plan, best_path);
+
+ return scan_plan;
+}
+
+/*
+ * create_worktablescan_plan
+ * Returns a worktablescan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static WorkTableScan *
+create_worktablescan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
+{
+ WorkTableScan *scan_plan;
+ Index scan_relid = best_path->parent->relid;
+ RangeTblEntry *rte;
+ Index levelsup;
+ PlannerInfo *cteroot;
+
+ Assert(scan_relid > 0);
+ rte = planner_rt_fetch(scan_relid, root);
+ Assert(rte->rtekind == RTE_CTE);
+ Assert(rte->self_reference);
+
+ /*
+ * We need to find the worktable param ID, which is in the plan level
+ * that's processing the recursive UNION, which is one level *below* where
+ * the CTE comes from.
+ */
+ levelsup = rte->ctelevelsup;
+ if (levelsup == 0) /* shouldn't happen */
+ elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
+ levelsup--;
+ cteroot = root;
+ while (levelsup-- > 0)
+ {
+ cteroot = cteroot->parent_root;
+ if (!cteroot) /* shouldn't happen */
+ elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
+ }
+ if (cteroot->wt_param_id < 0) /* shouldn't happen */
+ elog(ERROR, "could not find param ID for CTE \"%s\"", rte->ctename);
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->param_info)
+ {
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ }
+
+ scan_plan = make_worktablescan(tlist, scan_clauses, scan_relid,
+ cteroot->wt_param_id);
+
+ copy_generic_path_info(&scan_plan->scan.plan, best_path);
+
+ return scan_plan;
+}
+
+/*
+ * create_foreignscan_plan
+ * Returns a foreignscan plan for the relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static ForeignScan *
+create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
+ List *tlist, List *scan_clauses)
+{
+ ForeignScan *scan_plan;
+ RelOptInfo *rel = best_path->path.parent;
+ Index scan_relid = rel->relid;
+ Oid rel_oid = InvalidOid;
+ Plan *outer_plan = NULL;
+
+ Assert(rel->fdwroutine != NULL);
+
+ /* transform the child path if any */
+ if (best_path->fdw_outerpath)
+ outer_plan = create_plan_recurse(root, best_path->fdw_outerpath,
+ CP_EXACT_TLIST);
+
+ /*
+ * If we're scanning a base relation, fetch its OID. (Irrelevant if
+ * scanning a join relation.)
+ */
+ if (scan_relid > 0)
+ {
+ RangeTblEntry *rte;
+
+ Assert(rel->rtekind == RTE_RELATION);
+ rte = planner_rt_fetch(scan_relid, root);
+ Assert(rte->rtekind == RTE_RELATION);
+ rel_oid = rte->relid;
+ }
+
+ /*
+ * Sort clauses into best execution order. We do this first since the FDW
+ * might have more info than we do and wish to adjust the ordering.
+ */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /*
+ * Let the FDW perform its processing on the restriction clauses and
+ * generate the plan node. Note that the FDW might remove restriction
+ * clauses that it intends to execute remotely, or even add more (if it
+ * has selected some join clauses for remote use but also wants them
+ * rechecked locally).
+ */
+ scan_plan = rel->fdwroutine->GetForeignPlan(root, rel, rel_oid,
+ best_path,
+ tlist, scan_clauses,
+ outer_plan);
+
+ /* Copy cost data from Path to Plan; no need to make FDW do this */
+ copy_generic_path_info(&scan_plan->scan.plan, &best_path->path);
+
+ /* Copy foreign server OID; likewise, no need to make FDW do this */
+ scan_plan->fs_server = rel->serverid;
+
+ /*
+ * Likewise, copy the relids that are represented by this foreign scan. An
+ * upper rel doesn't have relids set, but it covers all the base relations
+ * participating in the underlying scan, so use root's all_baserels.
+ */
+ if (rel->reloptkind == RELOPT_UPPER_REL)
+ scan_plan->fs_relids = root->all_baserels;
+ else
+ scan_plan->fs_relids = best_path->path.parent->relids;
+
+ /*
+ * If this is a foreign join, and to make it valid to push down we had to
+ * assume that the current user is the same as some user explicitly named
+ * in the query, mark the finished plan as depending on the current user.
+ */
+ if (rel->useridiscurrent)
+ root->glob->dependsOnRole = true;
+
+ /*
+ * Replace any outer-relation variables with nestloop params in the qual,
+ * fdw_exprs and fdw_recheck_quals expressions. We do this last so that
+ * the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
+ * fdw_recheck_quals could have come from join clauses, so doing this
+ * beforehand on the scan_clauses wouldn't work.) We assume
+ * fdw_scan_tlist contains no such variables.
+ */
+ if (best_path->path.param_info)
+ {
+ scan_plan->scan.plan.qual = (List *)
+ replace_nestloop_params(root, (Node *) scan_plan->scan.plan.qual);
+ scan_plan->fdw_exprs = (List *)
+ replace_nestloop_params(root, (Node *) scan_plan->fdw_exprs);
+ scan_plan->fdw_recheck_quals = (List *)
+ replace_nestloop_params(root,
+ (Node *) scan_plan->fdw_recheck_quals);
+ }
+
+ /*
+ * If rel is a base relation, detect whether any system columns are
+ * requested from the rel. (If rel is a join relation, rel->relid will be
+ * 0, but there can be no Var with relid 0 in the rel's targetlist or the
+ * restriction clauses, so we skip this in that case. Note that any such
+ * columns in base relations that were joined are assumed to be contained
+ * in fdw_scan_tlist.) This is a bit of a kluge and might go away
+ * someday, so we intentionally leave it out of the API presented to FDWs.
+ */
+ scan_plan->fsSystemCol = false;
+ if (scan_relid > 0)
+ {
+ Bitmapset *attrs_used = NULL;
+ ListCell *lc;
+ int i;
+
+ /*
+ * First, examine all the attributes needed for joins or final output.
+ * Note: we must look at rel's targetlist, not the attr_needed data,
+ * because attr_needed isn't computed for inheritance child rels.
+ */
+ pull_varattnos((Node *) rel->reltarget->exprs, scan_relid, &attrs_used);
+
+ /* Add all the attributes used by restriction clauses. */
+ foreach(lc, rel->baserestrictinfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ pull_varattnos((Node *) rinfo->clause, scan_relid, &attrs_used);
+ }
+
+ /* Now, are any system columns requested from rel? */
+ for (i = FirstLowInvalidHeapAttributeNumber + 1; i < 0; i++)
+ {
+ if (bms_is_member(i - FirstLowInvalidHeapAttributeNumber, attrs_used))
+ {
+ scan_plan->fsSystemCol = true;
+ break;
+ }
+ }
+
+ bms_free(attrs_used);
+ }
+
+ return scan_plan;
+}
+
+/*
+ * create_customscan_plan
+ *
+ * Transform a CustomPath into a Plan.
+ */
+static CustomScan *
+create_customscan_plan(PlannerInfo *root, CustomPath *best_path,
+ List *tlist, List *scan_clauses)
+{
+ CustomScan *cplan;
+ RelOptInfo *rel = best_path->path.parent;
+ List *custom_plans = NIL;
+ ListCell *lc;
+
+ /* Recursively transform child paths. */
+ foreach(lc, best_path->custom_paths)
+ {
+ Plan *plan = create_plan_recurse(root, (Path *) lfirst(lc),
+ CP_EXACT_TLIST);
+
+ custom_plans = lappend(custom_plans, plan);
+ }
+
+ /*
+ * Sort clauses into the best execution order, although custom-scan
+ * provider can reorder them again.
+ */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /*
+ * Invoke custom plan provider to create the Plan node represented by the
+ * CustomPath.
+ */
+ cplan = castNode(CustomScan,
+ best_path->methods->PlanCustomPath(root,
+ rel,
+ best_path,
+ tlist,
+ scan_clauses,
+ custom_plans));
+
+ /*
+ * Copy cost data from Path to Plan; no need to make custom-plan providers
+ * do this
+ */
+ copy_generic_path_info(&cplan->scan.plan, &best_path->path);
+
+ /* Likewise, copy the relids that are represented by this custom scan */
+ cplan->custom_relids = best_path->path.parent->relids;
+
+ /*
+ * Replace any outer-relation variables with nestloop params in the qual
+ * and custom_exprs expressions. We do this last so that the custom-plan
+ * provider doesn't have to be involved. (Note that parts of custom_exprs
+ * could have come from join clauses, so doing this beforehand on the
+ * scan_clauses wouldn't work.) We assume custom_scan_tlist contains no
+ * such variables.
+ */
+ if (best_path->path.param_info)
+ {
+ cplan->scan.plan.qual = (List *)
+ replace_nestloop_params(root, (Node *) cplan->scan.plan.qual);
+ cplan->custom_exprs = (List *)
+ replace_nestloop_params(root, (Node *) cplan->custom_exprs);
+ }
+
+ return cplan;
+}
+
+
+/*****************************************************************************
+ *
+ * JOIN METHODS
+ *
+ *****************************************************************************/
+
+static NestLoop *
+create_nestloop_plan(PlannerInfo *root,
+ NestPath *best_path)
+{
+ NestLoop *join_plan;
+ Plan *outer_plan;
+ Plan *inner_plan;
+ List *tlist = build_path_tlist(root, &best_path->jpath.path);
+ List *joinrestrictclauses = best_path->jpath.joinrestrictinfo;
+ List *joinclauses;
+ List *otherclauses;
+ Relids outerrelids;
+ List *nestParams;
+ Relids saveOuterRels = root->curOuterRels;
+
+ /* NestLoop can project, so no need to be picky about child tlists */
+ outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath, 0);
+
+ /* For a nestloop, include outer relids in curOuterRels for inner side */
+ root->curOuterRels = bms_union(root->curOuterRels,
+ best_path->jpath.outerjoinpath->parent->relids);
+
+ inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath, 0);
+
+ /* Restore curOuterRels */
+ bms_free(root->curOuterRels);
+ root->curOuterRels = saveOuterRels;
+
+ /* Sort join qual clauses into best execution order */
+ joinrestrictclauses = order_qual_clauses(root, joinrestrictclauses);
+
+ /* Get the join qual clauses (in plain expression form) */
+ /* Any pseudoconstant clauses are ignored here */
+ if (IS_OUTER_JOIN(best_path->jpath.jointype))
+ {
+ extract_actual_join_clauses(joinrestrictclauses,
+ best_path->jpath.path.parent->relids,
+ &joinclauses, &otherclauses);
+ }
+ else
+ {
+ /* We can treat all clauses alike for an inner join */
+ joinclauses = extract_actual_clauses(joinrestrictclauses, false);
+ otherclauses = NIL;
+ }
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->jpath.path.param_info)
+ {
+ joinclauses = (List *)
+ replace_nestloop_params(root, (Node *) joinclauses);
+ otherclauses = (List *)
+ replace_nestloop_params(root, (Node *) otherclauses);
+ }
+
+ /*
+ * Identify any nestloop parameters that should be supplied by this join
+ * node, and remove them from root->curOuterParams.
+ */
+ outerrelids = best_path->jpath.outerjoinpath->parent->relids;
+ nestParams = identify_current_nestloop_params(root, outerrelids);
+
+ join_plan = make_nestloop(tlist,
+ joinclauses,
+ otherclauses,
+ nestParams,
+ outer_plan,
+ inner_plan,
+ best_path->jpath.jointype,
+ best_path->jpath.inner_unique);
+
+ copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
+
+ return join_plan;
+}
+
+static MergeJoin *
+create_mergejoin_plan(PlannerInfo *root,
+ MergePath *best_path)
+{
+ MergeJoin *join_plan;
+ Plan *outer_plan;
+ Plan *inner_plan;
+ List *tlist = build_path_tlist(root, &best_path->jpath.path);
+ List *joinclauses;
+ List *otherclauses;
+ List *mergeclauses;
+ List *outerpathkeys;
+ List *innerpathkeys;
+ int nClauses;
+ Oid *mergefamilies;
+ Oid *mergecollations;
+ int *mergestrategies;
+ bool *mergenullsfirst;
+ PathKey *opathkey;
+ EquivalenceClass *opeclass;
+ int i;
+ ListCell *lc;
+ ListCell *lop;
+ ListCell *lip;
+ Path *outer_path = best_path->jpath.outerjoinpath;
+ Path *inner_path = best_path->jpath.innerjoinpath;
+
+ /*
+ * MergeJoin can project, so we don't have to demand exact tlists from the
+ * inputs. However, if we're intending to sort an input's result, it's
+ * best to request a small tlist so we aren't sorting more data than
+ * necessary.
+ */
+ outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath,
+ (best_path->outersortkeys != NIL) ? CP_SMALL_TLIST : 0);
+
+ inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath,
+ (best_path->innersortkeys != NIL) ? CP_SMALL_TLIST : 0);
+
+ /* Sort join qual clauses into best execution order */
+ /* NB: do NOT reorder the mergeclauses */
+ joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
+
+ /* Get the join qual clauses (in plain expression form) */
+ /* Any pseudoconstant clauses are ignored here */
+ if (IS_OUTER_JOIN(best_path->jpath.jointype))
+ {
+ extract_actual_join_clauses(joinclauses,
+ best_path->jpath.path.parent->relids,
+ &joinclauses, &otherclauses);
+ }
+ else
+ {
+ /* We can treat all clauses alike for an inner join */
+ joinclauses = extract_actual_clauses(joinclauses, false);
+ otherclauses = NIL;
+ }
+
+ /*
+ * Remove the mergeclauses from the list of join qual clauses, leaving the
+ * list of quals that must be checked as qpquals.
+ */
+ mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
+ joinclauses = list_difference(joinclauses, mergeclauses);
+
+ /*
+ * Replace any outer-relation variables with nestloop params. There
+ * should not be any in the mergeclauses.
+ */
+ if (best_path->jpath.path.param_info)
+ {
+ joinclauses = (List *)
+ replace_nestloop_params(root, (Node *) joinclauses);
+ otherclauses = (List *)
+ replace_nestloop_params(root, (Node *) otherclauses);
+ }
+
+ /*
+ * Rearrange mergeclauses, if needed, so that the outer variable is always
+ * on the left; mark the mergeclause restrictinfos with correct
+ * outer_is_left status.
+ */
+ mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
+ best_path->jpath.outerjoinpath->parent->relids);
+
+ /*
+ * Create explicit sort nodes for the outer and inner paths if necessary.
+ */
+ if (best_path->outersortkeys)
+ {
+ Relids outer_relids = outer_path->parent->relids;
+ Sort *sort = make_sort_from_pathkeys(outer_plan,
+ best_path->outersortkeys,
+ outer_relids);
+
+ label_sort_with_costsize(root, sort, -1.0);
+ outer_plan = (Plan *) sort;
+ outerpathkeys = best_path->outersortkeys;
+ }
+ else
+ outerpathkeys = best_path->jpath.outerjoinpath->pathkeys;
+
+ if (best_path->innersortkeys)
+ {
+ Relids inner_relids = inner_path->parent->relids;
+ Sort *sort = make_sort_from_pathkeys(inner_plan,
+ best_path->innersortkeys,
+ inner_relids);
+
+ label_sort_with_costsize(root, sort, -1.0);
+ inner_plan = (Plan *) sort;
+ innerpathkeys = best_path->innersortkeys;
+ }
+ else
+ innerpathkeys = best_path->jpath.innerjoinpath->pathkeys;
+
+ /*
+ * If specified, add a materialize node to shield the inner plan from the
+ * need to handle mark/restore.
+ */
+ if (best_path->materialize_inner)
+ {
+ Plan *matplan = (Plan *) make_material(inner_plan);
+
+ /*
+ * We assume the materialize will not spill to disk, and therefore
+ * charge just cpu_operator_cost per tuple. (Keep this estimate in
+ * sync with final_cost_mergejoin.)
+ */
+ copy_plan_costsize(matplan, inner_plan);
+ matplan->total_cost += cpu_operator_cost * matplan->plan_rows;
+
+ inner_plan = matplan;
+ }
+
+ /*
+ * Compute the opfamily/collation/strategy/nullsfirst arrays needed by the
+ * executor. The information is in the pathkeys for the two inputs, but
+ * we need to be careful about the possibility of mergeclauses sharing a
+ * pathkey, as well as the possibility that the inner pathkeys are not in
+ * an order matching the mergeclauses.
+ */
+ nClauses = list_length(mergeclauses);
+ Assert(nClauses == list_length(best_path->path_mergeclauses));
+ mergefamilies = (Oid *) palloc(nClauses * sizeof(Oid));
+ mergecollations = (Oid *) palloc(nClauses * sizeof(Oid));
+ mergestrategies = (int *) palloc(nClauses * sizeof(int));
+ mergenullsfirst = (bool *) palloc(nClauses * sizeof(bool));
+
+ opathkey = NULL;
+ opeclass = NULL;
+ lop = list_head(outerpathkeys);
+ lip = list_head(innerpathkeys);
+ i = 0;
+ foreach(lc, best_path->path_mergeclauses)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
+ EquivalenceClass *oeclass;
+ EquivalenceClass *ieclass;
+ PathKey *ipathkey = NULL;
+ EquivalenceClass *ipeclass = NULL;
+ bool first_inner_match = false;
+
+ /* fetch outer/inner eclass from mergeclause */
+ if (rinfo->outer_is_left)
+ {
+ oeclass = rinfo->left_ec;
+ ieclass = rinfo->right_ec;
+ }
+ else
+ {
+ oeclass = rinfo->right_ec;
+ ieclass = rinfo->left_ec;
+ }
+ Assert(oeclass != NULL);
+ Assert(ieclass != NULL);
+
+ /*
+ * We must identify the pathkey elements associated with this clause
+ * by matching the eclasses (which should give a unique match, since
+ * the pathkey lists should be canonical). In typical cases the merge
+ * clauses are one-to-one with the pathkeys, but when dealing with
+ * partially redundant query conditions, things are more complicated.
+ *
+ * lop and lip reference the first as-yet-unmatched pathkey elements.
+ * If they're NULL then all pathkey elements have been matched.
+ *
+ * The ordering of the outer pathkeys should match the mergeclauses,
+ * by construction (see find_mergeclauses_for_outer_pathkeys()). There
+ * could be more than one mergeclause for the same outer pathkey, but
+ * no pathkey may be entirely skipped over.
+ */
+ if (oeclass != opeclass) /* multiple matches are not interesting */
+ {
+ /* doesn't match the current opathkey, so must match the next */
+ if (lop == NULL)
+ elog(ERROR, "outer pathkeys do not match mergeclauses");
+ opathkey = (PathKey *) lfirst(lop);
+ opeclass = opathkey->pk_eclass;
+ lop = lnext(outerpathkeys, lop);
+ if (oeclass != opeclass)
+ elog(ERROR, "outer pathkeys do not match mergeclauses");
+ }
+
+ /*
+ * The inner pathkeys likewise should not have skipped-over keys, but
+ * it's possible for a mergeclause to reference some earlier inner
+ * pathkey if we had redundant pathkeys. For example we might have
+ * mergeclauses like "o.a = i.x AND o.b = i.y AND o.c = i.x". The
+ * implied inner ordering is then "ORDER BY x, y, x", but the pathkey
+ * mechanism drops the second sort by x as redundant, and this code
+ * must cope.
+ *
+ * It's also possible for the implied inner-rel ordering to be like
+ * "ORDER BY x, y, x DESC". We still drop the second instance of x as
+ * redundant; but this means that the sort ordering of a redundant
+ * inner pathkey should not be considered significant. So we must
+ * detect whether this is the first clause matching an inner pathkey.
+ */
+ if (lip)
+ {
+ ipathkey = (PathKey *) lfirst(lip);
+ ipeclass = ipathkey->pk_eclass;
+ if (ieclass == ipeclass)
+ {
+ /* successful first match to this inner pathkey */
+ lip = lnext(innerpathkeys, lip);
+ first_inner_match = true;
+ }
+ }
+ if (!first_inner_match)
+ {
+ /* redundant clause ... must match something before lip */
+ ListCell *l2;
+
+ foreach(l2, innerpathkeys)
+ {
+ if (l2 == lip)
+ break;
+ ipathkey = (PathKey *) lfirst(l2);
+ ipeclass = ipathkey->pk_eclass;
+ if (ieclass == ipeclass)
+ break;
+ }
+ if (ieclass != ipeclass)
+ elog(ERROR, "inner pathkeys do not match mergeclauses");
+ }
+
+ /*
+ * The pathkeys should always match each other as to opfamily and
+ * collation (which affect equality), but if we're considering a
+ * redundant inner pathkey, its sort ordering might not match. In
+ * such cases we may ignore the inner pathkey's sort ordering and use
+ * the outer's. (In effect, we're lying to the executor about the
+ * sort direction of this inner column, but it does not matter since
+ * the run-time row comparisons would only reach this column when
+ * there's equality for the earlier column containing the same eclass.
+ * There could be only one value in this column for the range of inner
+ * rows having a given value in the earlier column, so it does not
+ * matter which way we imagine this column to be ordered.) But a
+ * non-redundant inner pathkey had better match outer's ordering too.
+ */
+ if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
+ opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation)
+ elog(ERROR, "left and right pathkeys do not match in mergejoin");
+ if (first_inner_match &&
+ (opathkey->pk_strategy != ipathkey->pk_strategy ||
+ opathkey->pk_nulls_first != ipathkey->pk_nulls_first))
+ elog(ERROR, "left and right pathkeys do not match in mergejoin");
+
+ /* OK, save info for executor */
+ mergefamilies[i] = opathkey->pk_opfamily;
+ mergecollations[i] = opathkey->pk_eclass->ec_collation;
+ mergestrategies[i] = opathkey->pk_strategy;
+ mergenullsfirst[i] = opathkey->pk_nulls_first;
+ i++;
+ }
+
+ /*
+ * Note: it is not an error if we have additional pathkey elements (i.e.,
+ * lop or lip isn't NULL here). The input paths might be better-sorted
+ * than we need for the current mergejoin.
+ */
+
+ /*
+ * Now we can build the mergejoin node.
+ */
+ join_plan = make_mergejoin(tlist,
+ joinclauses,
+ otherclauses,
+ mergeclauses,
+ mergefamilies,
+ mergecollations,
+ mergestrategies,
+ mergenullsfirst,
+ outer_plan,
+ inner_plan,
+ best_path->jpath.jointype,
+ best_path->jpath.inner_unique,
+ best_path->skip_mark_restore);
+
+ /* Costs of sort and material steps are included in path cost already */
+ copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
+
+ return join_plan;
+}
+
+static HashJoin *
+create_hashjoin_plan(PlannerInfo *root,
+ HashPath *best_path)
+{
+ HashJoin *join_plan;
+ Hash *hash_plan;
+ Plan *outer_plan;
+ Plan *inner_plan;
+ List *tlist = build_path_tlist(root, &best_path->jpath.path);
+ List *joinclauses;
+ List *otherclauses;
+ List *hashclauses;
+ List *hashoperators = NIL;
+ List *hashcollations = NIL;
+ List *inner_hashkeys = NIL;
+ List *outer_hashkeys = NIL;
+ Oid skewTable = InvalidOid;
+ AttrNumber skewColumn = InvalidAttrNumber;
+ bool skewInherit = false;
+ ListCell *lc;
+
+ /*
+ * HashJoin can project, so we don't have to demand exact tlists from the
+ * inputs. However, it's best to request a small tlist from the inner
+ * side, so that we aren't storing more data than necessary. Likewise, if
+ * we anticipate batching, request a small tlist from the outer side so
+ * that we don't put extra data in the outer batch files.
+ */
+ outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath,
+ (best_path->num_batches > 1) ? CP_SMALL_TLIST : 0);
+
+ inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath,
+ CP_SMALL_TLIST);
+
+ /* Sort join qual clauses into best execution order */
+ joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
+ /* There's no point in sorting the hash clauses ... */
+
+ /* Get the join qual clauses (in plain expression form) */
+ /* Any pseudoconstant clauses are ignored here */
+ if (IS_OUTER_JOIN(best_path->jpath.jointype))
+ {
+ extract_actual_join_clauses(joinclauses,
+ best_path->jpath.path.parent->relids,
+ &joinclauses, &otherclauses);
+ }
+ else
+ {
+ /* We can treat all clauses alike for an inner join */
+ joinclauses = extract_actual_clauses(joinclauses, false);
+ otherclauses = NIL;
+ }
+
+ /*
+ * Remove the hashclauses from the list of join qual clauses, leaving the
+ * list of quals that must be checked as qpquals.
+ */
+ hashclauses = get_actual_clauses(best_path->path_hashclauses);
+ joinclauses = list_difference(joinclauses, hashclauses);
+
+ /*
+ * Replace any outer-relation variables with nestloop params. There
+ * should not be any in the hashclauses.
+ */
+ if (best_path->jpath.path.param_info)
+ {
+ joinclauses = (List *)
+ replace_nestloop_params(root, (Node *) joinclauses);
+ otherclauses = (List *)
+ replace_nestloop_params(root, (Node *) otherclauses);
+ }
+
+ /*
+ * Rearrange hashclauses, if needed, so that the outer variable is always
+ * on the left.
+ */
+ hashclauses = get_switched_clauses(best_path->path_hashclauses,
+ best_path->jpath.outerjoinpath->parent->relids);
+
+ /*
+ * If there is a single join clause and we can identify the outer variable
+ * as a simple column reference, supply its identity for possible use in
+ * skew optimization. (Note: in principle we could do skew optimization
+ * with multiple join clauses, but we'd have to be able to determine the
+ * most common combinations of outer values, which we don't currently have
+ * enough stats for.)
+ */
+ if (list_length(hashclauses) == 1)
+ {
+ OpExpr *clause = (OpExpr *) linitial(hashclauses);
+ Node *node;
+
+ Assert(is_opclause(clause));
+ node = (Node *) linitial(clause->args);
+ if (IsA(node, RelabelType))
+ node = (Node *) ((RelabelType *) node)->arg;
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+ RangeTblEntry *rte;
+
+ rte = root->simple_rte_array[var->varno];
+ if (rte->rtekind == RTE_RELATION)
+ {
+ skewTable = rte->relid;
+ skewColumn = var->varattno;
+ skewInherit = rte->inh;
+ }
+ }
+ }
+
+ /*
+ * Collect hash related information. The hashed expressions are
+ * deconstructed into outer/inner expressions, so they can be computed
+ * separately (inner expressions are used to build the hashtable via Hash,
+ * outer expressions to perform lookups of tuples from HashJoin's outer
+ * plan in the hashtable). Also collect operator information necessary to
+ * build the hashtable.
+ */
+ foreach(lc, hashclauses)
+ {
+ OpExpr *hclause = lfirst_node(OpExpr, lc);
+
+ hashoperators = lappend_oid(hashoperators, hclause->opno);
+ hashcollations = lappend_oid(hashcollations, hclause->inputcollid);
+ outer_hashkeys = lappend(outer_hashkeys, linitial(hclause->args));
+ inner_hashkeys = lappend(inner_hashkeys, lsecond(hclause->args));
+ }
+
+ /*
+ * Build the hash node and hash join node.
+ */
+ hash_plan = make_hash(inner_plan,
+ inner_hashkeys,
+ skewTable,
+ skewColumn,
+ skewInherit);
+
+ /*
+ * Set Hash node's startup & total costs equal to total cost of input
+ * plan; this only affects EXPLAIN display not decisions.
+ */
+ copy_plan_costsize(&hash_plan->plan, inner_plan);
+ hash_plan->plan.startup_cost = hash_plan->plan.total_cost;
+
+ /*
+ * If parallel-aware, the executor will also need an estimate of the total
+ * number of rows expected from all participants so that it can size the
+ * shared hash table.
+ */
+ if (best_path->jpath.path.parallel_aware)
+ {
+ hash_plan->plan.parallel_aware = true;
+ hash_plan->rows_total = best_path->inner_rows_total;
+ }
+
+ join_plan = make_hashjoin(tlist,
+ joinclauses,
+ otherclauses,
+ hashclauses,
+ hashoperators,
+ hashcollations,
+ outer_hashkeys,
+ outer_plan,
+ (Plan *) hash_plan,
+ best_path->jpath.jointype,
+ best_path->jpath.inner_unique);
+
+ copy_generic_path_info(&join_plan->join.plan, &best_path->jpath.path);
+
+ return join_plan;
+}
+
+
+/*****************************************************************************
+ *
+ * SUPPORTING ROUTINES
+ *
+ *****************************************************************************/
+
+/*
+ * replace_nestloop_params
+ * Replace outer-relation Vars and PlaceHolderVars in the given expression
+ * with nestloop Params
+ *
+ * All Vars and PlaceHolderVars belonging to the relation(s) identified by
+ * root->curOuterRels are replaced by Params, and entries are added to
+ * root->curOuterParams if not already present.
+ */
+static Node *
+replace_nestloop_params(PlannerInfo *root, Node *expr)
+{
+ /* No setup needed for tree walk, so away we go */
+ return replace_nestloop_params_mutator(expr, root);
+}
+
+static Node *
+replace_nestloop_params_mutator(Node *node, PlannerInfo *root)
+{
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+
+ /* Upper-level Vars should be long gone at this point */
+ Assert(var->varlevelsup == 0);
+ /* If not to be replaced, we can just return the Var unmodified */
+ if (IS_SPECIAL_VARNO(var->varno) ||
+ !bms_is_member(var->varno, root->curOuterRels))
+ return node;
+ /* Replace the Var with a nestloop Param */
+ return (Node *) replace_nestloop_param_var(root, var);
+ }
+ if (IsA(node, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+
+ /* Upper-level PlaceHolderVars should be long gone at this point */
+ Assert(phv->phlevelsup == 0);
+
+ /*
+ * Check whether we need to replace the PHV. We use bms_overlap as a
+ * cheap/quick test to see if the PHV might be evaluated in the outer
+ * rels, and then grab its PlaceHolderInfo to tell for sure.
+ */
+ if (!bms_overlap(phv->phrels, root->curOuterRels) ||
+ !bms_is_subset(find_placeholder_info(root, phv, false)->ph_eval_at,
+ root->curOuterRels))
+ {
+ /*
+ * We can't replace the whole PHV, but we might still need to
+ * replace Vars or PHVs within its expression, in case it ends up
+ * actually getting evaluated here. (It might get evaluated in
+ * this plan node, or some child node; in the latter case we don't
+ * really need to process the expression here, but we haven't got
+ * enough info to tell if that's the case.) Flat-copy the PHV
+ * node and then recurse on its expression.
+ *
+ * Note that after doing this, we might have different
+ * representations of the contents of the same PHV in different
+ * parts of the plan tree. This is OK because equal() will just
+ * match on phid/phlevelsup, so setrefs.c will still recognize an
+ * upper-level reference to a lower-level copy of the same PHV.
+ */
+ PlaceHolderVar *newphv = makeNode(PlaceHolderVar);
+
+ memcpy(newphv, phv, sizeof(PlaceHolderVar));
+ newphv->phexpr = (Expr *)
+ replace_nestloop_params_mutator((Node *) phv->phexpr,
+ root);
+ return (Node *) newphv;
+ }
+ /* Replace the PlaceHolderVar with a nestloop Param */
+ return (Node *) replace_nestloop_param_placeholdervar(root, phv);
+ }
+ return expression_tree_mutator(node,
+ replace_nestloop_params_mutator,
+ (void *) root);
+}
+
+/*
+ * fix_indexqual_references
+ * Adjust indexqual clauses to the form the executor's indexqual
+ * machinery needs.
+ *
+ * We have three tasks here:
+ * * Select the actual qual clauses out of the input IndexClause list,
+ * and remove RestrictInfo nodes from the qual clauses.
+ * * Replace any outer-relation Var or PHV nodes with nestloop Params.
+ * (XXX eventually, that responsibility should go elsewhere?)
+ * * Index keys must be represented by Var nodes with varattno set to the
+ * index's attribute number, not the attribute number in the original rel.
+ *
+ * *stripped_indexquals_p receives a list of the actual qual clauses.
+ *
+ * *fixed_indexquals_p receives a list of the adjusted quals. This is a copy
+ * that shares no substructure with the original; this is needed in case there
+ * are subplans in it (we need two separate copies of the subplan tree, or
+ * things will go awry).
+ */
+static void
+fix_indexqual_references(PlannerInfo *root, IndexPath *index_path,
+ List **stripped_indexquals_p, List **fixed_indexquals_p)
+{
+ IndexOptInfo *index = index_path->indexinfo;
+ List *stripped_indexquals;
+ List *fixed_indexquals;
+ ListCell *lc;
+
+ stripped_indexquals = fixed_indexquals = NIL;
+
+ foreach(lc, index_path->indexclauses)
+ {
+ IndexClause *iclause = lfirst_node(IndexClause, lc);
+ int indexcol = iclause->indexcol;
+ ListCell *lc2;
+
+ foreach(lc2, iclause->indexquals)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
+ Node *clause = (Node *) rinfo->clause;
+
+ stripped_indexquals = lappend(stripped_indexquals, clause);
+ clause = fix_indexqual_clause(root, index, indexcol,
+ clause, iclause->indexcols);
+ fixed_indexquals = lappend(fixed_indexquals, clause);
+ }
+ }
+
+ *stripped_indexquals_p = stripped_indexquals;
+ *fixed_indexquals_p = fixed_indexquals;
+}
+
+/*
+ * fix_indexorderby_references
+ * Adjust indexorderby clauses to the form the executor's index
+ * machinery needs.
+ *
+ * This is a simplified version of fix_indexqual_references. The input is
+ * bare clauses and a separate indexcol list, instead of IndexClauses.
+ */
+static List *
+fix_indexorderby_references(PlannerInfo *root, IndexPath *index_path)
+{
+ IndexOptInfo *index = index_path->indexinfo;
+ List *fixed_indexorderbys;
+ ListCell *lcc,
+ *lci;
+
+ fixed_indexorderbys = NIL;
+
+ forboth(lcc, index_path->indexorderbys, lci, index_path->indexorderbycols)
+ {
+ Node *clause = (Node *) lfirst(lcc);
+ int indexcol = lfirst_int(lci);
+
+ clause = fix_indexqual_clause(root, index, indexcol, clause, NIL);
+ fixed_indexorderbys = lappend(fixed_indexorderbys, clause);
+ }
+
+ return fixed_indexorderbys;
+}
+
+/*
+ * fix_indexqual_clause
+ * Convert a single indexqual clause to the form needed by the executor.
+ *
+ * We replace nestloop params here, and replace the index key variables
+ * or expressions by index Var nodes.
+ */
+static Node *
+fix_indexqual_clause(PlannerInfo *root, IndexOptInfo *index, int indexcol,
+ Node *clause, List *indexcolnos)
+{
+ /*
+ * Replace any outer-relation variables with nestloop params.
+ *
+ * This also makes a copy of the clause, so it's safe to modify it
+ * in-place below.
+ */
+ clause = replace_nestloop_params(root, clause);
+
+ if (IsA(clause, OpExpr))
+ {
+ OpExpr *op = (OpExpr *) clause;
+
+ /* Replace the indexkey expression with an index Var. */
+ linitial(op->args) = fix_indexqual_operand(linitial(op->args),
+ index,
+ indexcol);
+ }
+ else if (IsA(clause, RowCompareExpr))
+ {
+ RowCompareExpr *rc = (RowCompareExpr *) clause;
+ ListCell *lca,
+ *lcai;
+
+ /* Replace the indexkey expressions with index Vars. */
+ Assert(list_length(rc->largs) == list_length(indexcolnos));
+ forboth(lca, rc->largs, lcai, indexcolnos)
+ {
+ lfirst(lca) = fix_indexqual_operand(lfirst(lca),
+ index,
+ lfirst_int(lcai));
+ }
+ }
+ else if (IsA(clause, ScalarArrayOpExpr))
+ {
+ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
+
+ /* Replace the indexkey expression with an index Var. */
+ linitial(saop->args) = fix_indexqual_operand(linitial(saop->args),
+ index,
+ indexcol);
+ }
+ else if (IsA(clause, NullTest))
+ {
+ NullTest *nt = (NullTest *) clause;
+
+ /* Replace the indexkey expression with an index Var. */
+ nt->arg = (Expr *) fix_indexqual_operand((Node *) nt->arg,
+ index,
+ indexcol);
+ }
+ else
+ elog(ERROR, "unsupported indexqual type: %d",
+ (int) nodeTag(clause));
+
+ return clause;
+}
+
+/*
+ * fix_indexqual_operand
+ * Convert an indexqual expression to a Var referencing the index column.
+ *
+ * We represent index keys by Var nodes having varno == INDEX_VAR and varattno
+ * equal to the index's attribute number (index column position).
+ *
+ * Most of the code here is just for sanity cross-checking that the given
+ * expression actually matches the index column it's claimed to.
+ */
+static Node *
+fix_indexqual_operand(Node *node, IndexOptInfo *index, int indexcol)
+{
+ Var *result;
+ int pos;
+ ListCell *indexpr_item;
+
+ /*
+ * Remove any binary-compatible relabeling of the indexkey
+ */
+ if (IsA(node, RelabelType))
+ node = (Node *) ((RelabelType *) node)->arg;
+
+ Assert(indexcol >= 0 && indexcol < index->ncolumns);
+
+ if (index->indexkeys[indexcol] != 0)
+ {
+ /* It's a simple index column */
+ if (IsA(node, Var) &&
+ ((Var *) node)->varno == index->rel->relid &&
+ ((Var *) node)->varattno == index->indexkeys[indexcol])
+ {
+ result = (Var *) copyObject(node);
+ result->varno = INDEX_VAR;
+ result->varattno = indexcol + 1;
+ return (Node *) result;
+ }
+ else
+ elog(ERROR, "index key does not match expected index column");
+ }
+
+ /* It's an index expression, so find and cross-check the expression */
+ indexpr_item = list_head(index->indexprs);
+ for (pos = 0; pos < index->ncolumns; pos++)
+ {
+ if (index->indexkeys[pos] == 0)
+ {
+ if (indexpr_item == NULL)
+ elog(ERROR, "too few entries in indexprs list");
+ if (pos == indexcol)
+ {
+ Node *indexkey;
+
+ indexkey = (Node *) lfirst(indexpr_item);
+ if (indexkey && IsA(indexkey, RelabelType))
+ indexkey = (Node *) ((RelabelType *) indexkey)->arg;
+ if (equal(node, indexkey))
+ {
+ result = makeVar(INDEX_VAR, indexcol + 1,
+ exprType(lfirst(indexpr_item)), -1,
+ exprCollation(lfirst(indexpr_item)),
+ 0);
+ return (Node *) result;
+ }
+ else
+ elog(ERROR, "index key does not match expected index column");
+ }
+ indexpr_item = lnext(index->indexprs, indexpr_item);
+ }
+ }
+
+ /* Oops... */
+ elog(ERROR, "index key does not match expected index column");
+ return NULL; /* keep compiler quiet */
+}
+
+/*
+ * get_switched_clauses
+ * Given a list of merge or hash joinclauses (as RestrictInfo nodes),
+ * extract the bare clauses, and rearrange the elements within the
+ * clauses, if needed, so the outer join variable is on the left and
+ * the inner is on the right. The original clause data structure is not
+ * touched; a modified list is returned. We do, however, set the transient
+ * outer_is_left field in each RestrictInfo to show which side was which.
+ */
+static List *
+get_switched_clauses(List *clauses, Relids outerrelids)
+{
+ List *t_list = NIL;
+ ListCell *l;
+
+ foreach(l, clauses)
+ {
+ RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
+ OpExpr *clause = (OpExpr *) restrictinfo->clause;
+
+ Assert(is_opclause(clause));
+ if (bms_is_subset(restrictinfo->right_relids, outerrelids))
+ {
+ /*
+ * Duplicate just enough of the structure to allow commuting the
+ * clause without changing the original list. Could use
+ * copyObject, but a complete deep copy is overkill.
+ */
+ OpExpr *temp = makeNode(OpExpr);
+
+ temp->opno = clause->opno;
+ temp->opfuncid = InvalidOid;
+ temp->opresulttype = clause->opresulttype;
+ temp->opretset = clause->opretset;
+ temp->opcollid = clause->opcollid;
+ temp->inputcollid = clause->inputcollid;
+ temp->args = list_copy(clause->args);
+ temp->location = clause->location;
+ /* Commute it --- note this modifies the temp node in-place. */
+ CommuteOpExpr(temp);
+ t_list = lappend(t_list, temp);
+ restrictinfo->outer_is_left = false;
+ }
+ else
+ {
+ Assert(bms_is_subset(restrictinfo->left_relids, outerrelids));
+ t_list = lappend(t_list, clause);
+ restrictinfo->outer_is_left = true;
+ }
+ }
+ return t_list;
+}
+
+/*
+ * order_qual_clauses
+ * Given a list of qual clauses that will all be evaluated at the same
+ * plan node, sort the list into the order we want to check the quals
+ * in at runtime.
+ *
+ * When security barrier quals are used in the query, we may have quals with
+ * different security levels in the list. Quals of lower security_level
+ * must go before quals of higher security_level, except that we can grant
+ * exceptions to move up quals that are leakproof. When security level
+ * doesn't force the decision, we prefer to order clauses by estimated
+ * execution cost, cheapest first.
+ *
+ * Ideally the order should be driven by a combination of execution cost and
+ * selectivity, but it's not immediately clear how to account for both,
+ * and given the uncertainty of the estimates the reliability of the decisions
+ * would be doubtful anyway. So we just order by security level then
+ * estimated per-tuple cost, being careful not to change the order when
+ * (as is often the case) the estimates are identical.
+ *
+ * Although this will work on either bare clauses or RestrictInfos, it's
+ * much faster to apply it to RestrictInfos, since it can re-use cost
+ * information that is cached in RestrictInfos. XXX in the bare-clause
+ * case, we are also not able to apply security considerations. That is
+ * all right for the moment, because the bare-clause case doesn't occur
+ * anywhere that barrier quals could be present, but it would be better to
+ * get rid of it.
+ *
+ * Note: some callers pass lists that contain entries that will later be
+ * removed; this is the easiest way to let this routine see RestrictInfos
+ * instead of bare clauses. This is another reason why trying to consider
+ * selectivity in the ordering would likely do the wrong thing.
+ */
+static List *
+order_qual_clauses(PlannerInfo *root, List *clauses)
+{
+ typedef struct
+ {
+ Node *clause;
+ Cost cost;
+ Index security_level;
+ } QualItem;
+ int nitems = list_length(clauses);
+ QualItem *items;
+ ListCell *lc;
+ int i;
+ List *result;
+
+ /* No need to work hard for 0 or 1 clause */
+ if (nitems <= 1)
+ return clauses;
+
+ /*
+ * Collect the items and costs into an array. This is to avoid repeated
+ * cost_qual_eval work if the inputs aren't RestrictInfos.
+ */
+ items = (QualItem *) palloc(nitems * sizeof(QualItem));
+ i = 0;
+ foreach(lc, clauses)
+ {
+ Node *clause = (Node *) lfirst(lc);
+ QualCost qcost;
+
+ cost_qual_eval_node(&qcost, clause, root);
+ items[i].clause = clause;
+ items[i].cost = qcost.per_tuple;
+ if (IsA(clause, RestrictInfo))
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) clause;
+
+ /*
+ * If a clause is leakproof, it doesn't have to be constrained by
+ * its nominal security level. If it's also reasonably cheap
+ * (here defined as 10X cpu_operator_cost), pretend it has
+ * security_level 0, which will allow it to go in front of
+ * more-expensive quals of lower security levels. Of course, that
+ * will also force it to go in front of cheaper quals of its own
+ * security level, which is not so great, but we can alleviate
+ * that risk by applying the cost limit cutoff.
+ */
+ if (rinfo->leakproof && items[i].cost < 10 * cpu_operator_cost)
+ items[i].security_level = 0;
+ else
+ items[i].security_level = rinfo->security_level;
+ }
+ else
+ items[i].security_level = 0;
+ i++;
+ }
+
+ /*
+ * Sort. We don't use qsort() because it's not guaranteed stable for
+ * equal keys. The expected number of entries is small enough that a
+ * simple insertion sort should be good enough.
+ */
+ for (i = 1; i < nitems; i++)
+ {
+ QualItem newitem = items[i];
+ int j;
+
+ /* insert newitem into the already-sorted subarray */
+ for (j = i; j > 0; j--)
+ {
+ QualItem *olditem = &items[j - 1];
+
+ if (newitem.security_level > olditem->security_level ||
+ (newitem.security_level == olditem->security_level &&
+ newitem.cost >= olditem->cost))
+ break;
+ items[j] = *olditem;
+ }
+ items[j] = newitem;
+ }
+
+ /* Convert back to a list */
+ result = NIL;
+ for (i = 0; i < nitems; i++)
+ result = lappend(result, items[i].clause);
+
+ return result;
+}
+
+/*
+ * Copy cost and size info from a Path node to the Plan node created from it.
+ * The executor usually won't use this info, but it's needed by EXPLAIN.
+ * Also copy the parallel-related flags, which the executor *will* use.
+ */
+static void
+copy_generic_path_info(Plan *dest, Path *src)
+{
+ dest->startup_cost = src->startup_cost;
+ dest->total_cost = src->total_cost;
+ dest->plan_rows = src->rows;
+ dest->plan_width = src->pathtarget->width;
+ dest->parallel_aware = src->parallel_aware;
+ dest->parallel_safe = src->parallel_safe;
+}
+
+/*
+ * Copy cost and size info from a lower plan node to an inserted node.
+ * (Most callers alter the info after copying it.)
+ */
+static void
+copy_plan_costsize(Plan *dest, Plan *src)
+{
+ dest->startup_cost = src->startup_cost;
+ dest->total_cost = src->total_cost;
+ dest->plan_rows = src->plan_rows;
+ dest->plan_width = src->plan_width;
+ /* Assume the inserted node is not parallel-aware. */
+ dest->parallel_aware = false;
+ /* Assume the inserted node is parallel-safe, if child plan is. */
+ dest->parallel_safe = src->parallel_safe;
+}
+
+/*
+ * Some places in this file build Sort nodes that don't have a directly
+ * corresponding Path node. The cost of the sort is, or should have been,
+ * included in the cost of the Path node we're working from, but since it's
+ * not split out, we have to re-figure it using cost_sort(). This is just
+ * to label the Sort node nicely for EXPLAIN.
+ *
+ * limit_tuples is as for cost_sort (in particular, pass -1 if no limit)
+ */
+static void
+label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
+{
+ Plan *lefttree = plan->plan.lefttree;
+ Path sort_path; /* dummy for result of cost_sort */
+
+ /*
+ * This function shouldn't have to deal with IncrementalSort plans because
+ * they are only created from corresponding Path nodes.
+ */
+ Assert(IsA(plan, Sort));
+
+ cost_sort(&sort_path, root, NIL,
+ lefttree->total_cost,
+ lefttree->plan_rows,
+ lefttree->plan_width,
+ 0.0,
+ work_mem,
+ limit_tuples);
+ plan->plan.startup_cost = sort_path.startup_cost;
+ plan->plan.total_cost = sort_path.total_cost;
+ plan->plan.plan_rows = lefttree->plan_rows;
+ plan->plan.plan_width = lefttree->plan_width;
+ plan->plan.parallel_aware = false;
+ plan->plan.parallel_safe = lefttree->parallel_safe;
+}
+
+/*
+ * bitmap_subplan_mark_shared
+ * Set isshared flag in bitmap subplan so that it will be created in
+ * shared memory.
+ */
+static void
+bitmap_subplan_mark_shared(Plan *plan)
+{
+ if (IsA(plan, BitmapAnd))
+ bitmap_subplan_mark_shared(linitial(((BitmapAnd *) plan)->bitmapplans));
+ else if (IsA(plan, BitmapOr))
+ {
+ ((BitmapOr *) plan)->isshared = true;
+ bitmap_subplan_mark_shared(linitial(((BitmapOr *) plan)->bitmapplans));
+ }
+ else if (IsA(plan, BitmapIndexScan))
+ ((BitmapIndexScan *) plan)->isshared = true;
+ else
+ elog(ERROR, "unrecognized node type: %d", nodeTag(plan));
+}
+
+/*****************************************************************************
+ *
+ * PLAN NODE BUILDING ROUTINES
+ *
+ * In general, these functions are not passed the original Path and therefore
+ * leave it to the caller to fill in the cost/width fields from the Path,
+ * typically by calling copy_generic_path_info(). This convention is
+ * somewhat historical, but it does support a few places above where we build
+ * a plan node without having an exactly corresponding Path node. Under no
+ * circumstances should one of these functions do its own cost calculations,
+ * as that would be redundant with calculations done while building Paths.
+ *
+ *****************************************************************************/
+
+static SeqScan *
+make_seqscan(List *qptlist,
+ List *qpqual,
+ Index scanrelid)
+{
+ SeqScan *node = makeNode(SeqScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+
+ return node;
+}
+
+static SampleScan *
+make_samplescan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ TableSampleClause *tsc)
+{
+ SampleScan *node = makeNode(SampleScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->tablesample = tsc;
+
+ return node;
+}
+
+static IndexScan *
+make_indexscan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ Oid indexid,
+ List *indexqual,
+ List *indexqualorig,
+ List *indexorderby,
+ List *indexorderbyorig,
+ List *indexorderbyops,
+ ScanDirection indexscandir)
+{
+ IndexScan *node = makeNode(IndexScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->indexid = indexid;
+ node->indexqual = indexqual;
+ node->indexqualorig = indexqualorig;
+ node->indexorderby = indexorderby;
+ node->indexorderbyorig = indexorderbyorig;
+ node->indexorderbyops = indexorderbyops;
+ node->indexorderdir = indexscandir;
+
+ return node;
+}
+
+static IndexOnlyScan *
+make_indexonlyscan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ Oid indexid,
+ List *indexqual,
+ List *recheckqual,
+ List *indexorderby,
+ List *indextlist,
+ ScanDirection indexscandir)
+{
+ IndexOnlyScan *node = makeNode(IndexOnlyScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->indexid = indexid;
+ node->indexqual = indexqual;
+ node->recheckqual = recheckqual;
+ node->indexorderby = indexorderby;
+ node->indextlist = indextlist;
+ node->indexorderdir = indexscandir;
+
+ return node;
+}
+
+static BitmapIndexScan *
+make_bitmap_indexscan(Index scanrelid,
+ Oid indexid,
+ List *indexqual,
+ List *indexqualorig)
+{
+ BitmapIndexScan *node = makeNode(BitmapIndexScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = NIL; /* not used */
+ plan->qual = NIL; /* not used */
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->indexid = indexid;
+ node->indexqual = indexqual;
+ node->indexqualorig = indexqualorig;
+
+ return node;
+}
+
+static BitmapHeapScan *
+make_bitmap_heapscan(List *qptlist,
+ List *qpqual,
+ Plan *lefttree,
+ List *bitmapqualorig,
+ Index scanrelid)
+{
+ BitmapHeapScan *node = makeNode(BitmapHeapScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->bitmapqualorig = bitmapqualorig;
+
+ return node;
+}
+
+static TidScan *
+make_tidscan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ List *tidquals)
+{
+ TidScan *node = makeNode(TidScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->tidquals = tidquals;
+
+ return node;
+}
+
+static TidRangeScan *
+make_tidrangescan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ List *tidrangequals)
+{
+ TidRangeScan *node = makeNode(TidRangeScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->tidrangequals = tidrangequals;
+
+ return node;
+}
+
+static SubqueryScan *
+make_subqueryscan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ Plan *subplan)
+{
+ SubqueryScan *node = makeNode(SubqueryScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->subplan = subplan;
+ node->scanstatus = SUBQUERY_SCAN_UNKNOWN;
+
+ return node;
+}
+
+static FunctionScan *
+make_functionscan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ List *functions,
+ bool funcordinality)
+{
+ FunctionScan *node = makeNode(FunctionScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->functions = functions;
+ node->funcordinality = funcordinality;
+
+ return node;
+}
+
+static TableFuncScan *
+make_tablefuncscan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ TableFunc *tablefunc)
+{
+ TableFuncScan *node = makeNode(TableFuncScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->tablefunc = tablefunc;
+
+ return node;
+}
+
+static ValuesScan *
+make_valuesscan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ List *values_lists)
+{
+ ValuesScan *node = makeNode(ValuesScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->values_lists = values_lists;
+
+ return node;
+}
+
+static CteScan *
+make_ctescan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ int ctePlanId,
+ int cteParam)
+{
+ CteScan *node = makeNode(CteScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->ctePlanId = ctePlanId;
+ node->cteParam = cteParam;
+
+ return node;
+}
+
+static NamedTuplestoreScan *
+make_namedtuplestorescan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ char *enrname)
+{
+ NamedTuplestoreScan *node = makeNode(NamedTuplestoreScan);
+ Plan *plan = &node->scan.plan;
+
+ /* cost should be inserted by caller */
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->enrname = enrname;
+
+ return node;
+}
+
+static WorkTableScan *
+make_worktablescan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ int wtParam)
+{
+ WorkTableScan *node = makeNode(WorkTableScan);
+ Plan *plan = &node->scan.plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->wtParam = wtParam;
+
+ return node;
+}
+
+ForeignScan *
+make_foreignscan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ List *fdw_exprs,
+ List *fdw_private,
+ List *fdw_scan_tlist,
+ List *fdw_recheck_quals,
+ Plan *outer_plan)
+{
+ ForeignScan *node = makeNode(ForeignScan);
+ Plan *plan = &node->scan.plan;
+
+ /* cost will be filled in by create_foreignscan_plan */
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = outer_plan;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+
+ /* these may be overridden by the FDW's PlanDirectModify callback. */
+ node->operation = CMD_SELECT;
+ node->resultRelation = 0;
+
+ /* fs_server will be filled in by create_foreignscan_plan */
+ node->fs_server = InvalidOid;
+ node->fdw_exprs = fdw_exprs;
+ node->fdw_private = fdw_private;
+ node->fdw_scan_tlist = fdw_scan_tlist;
+ node->fdw_recheck_quals = fdw_recheck_quals;
+ /* fs_relids will be filled in by create_foreignscan_plan */
+ node->fs_relids = NULL;
+ /* fsSystemCol will be filled in by create_foreignscan_plan */
+ node->fsSystemCol = false;
+
+ return node;
+}
+
+static RecursiveUnion *
+make_recursive_union(List *tlist,
+ Plan *lefttree,
+ Plan *righttree,
+ int wtParam,
+ List *distinctList,
+ long numGroups)
+{
+ RecursiveUnion *node = makeNode(RecursiveUnion);
+ Plan *plan = &node->plan;
+ int numCols = list_length(distinctList);
+
+ plan->targetlist = tlist;
+ plan->qual = NIL;
+ plan->lefttree = lefttree;
+ plan->righttree = righttree;
+ node->wtParam = wtParam;
+
+ /*
+ * convert SortGroupClause list into arrays of attr indexes and equality
+ * operators, as wanted by executor
+ */
+ node->numCols = numCols;
+ if (numCols > 0)
+ {
+ int keyno = 0;
+ AttrNumber *dupColIdx;
+ Oid *dupOperators;
+ Oid *dupCollations;
+ ListCell *slitem;
+
+ dupColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
+ dupOperators = (Oid *) palloc(sizeof(Oid) * numCols);
+ dupCollations = (Oid *) palloc(sizeof(Oid) * numCols);
+
+ foreach(slitem, distinctList)
+ {
+ SortGroupClause *sortcl = (SortGroupClause *) lfirst(slitem);
+ TargetEntry *tle = get_sortgroupclause_tle(sortcl,
+ plan->targetlist);
+
+ dupColIdx[keyno] = tle->resno;
+ dupOperators[keyno] = sortcl->eqop;
+ dupCollations[keyno] = exprCollation((Node *) tle->expr);
+ Assert(OidIsValid(dupOperators[keyno]));
+ keyno++;
+ }
+ node->dupColIdx = dupColIdx;
+ node->dupOperators = dupOperators;
+ node->dupCollations = dupCollations;
+ }
+ node->numGroups = numGroups;
+
+ return node;
+}
+
+static BitmapAnd *
+make_bitmap_and(List *bitmapplans)
+{
+ BitmapAnd *node = makeNode(BitmapAnd);
+ Plan *plan = &node->plan;
+
+ plan->targetlist = NIL;
+ plan->qual = NIL;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->bitmapplans = bitmapplans;
+
+ return node;
+}
+
+static BitmapOr *
+make_bitmap_or(List *bitmapplans)
+{
+ BitmapOr *node = makeNode(BitmapOr);
+ Plan *plan = &node->plan;
+
+ plan->targetlist = NIL;
+ plan->qual = NIL;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->bitmapplans = bitmapplans;
+
+ return node;
+}
+
+static NestLoop *
+make_nestloop(List *tlist,
+ List *joinclauses,
+ List *otherclauses,
+ List *nestParams,
+ Plan *lefttree,
+ Plan *righttree,
+ JoinType jointype,
+ bool inner_unique)
+{
+ NestLoop *node = makeNode(NestLoop);
+ Plan *plan = &node->join.plan;
+
+ plan->targetlist = tlist;
+ plan->qual = otherclauses;
+ plan->lefttree = lefttree;
+ plan->righttree = righttree;
+ node->join.jointype = jointype;
+ node->join.inner_unique = inner_unique;
+ node->join.joinqual = joinclauses;
+ node->nestParams = nestParams;
+
+ return node;
+}
+
+static HashJoin *
+make_hashjoin(List *tlist,
+ List *joinclauses,
+ List *otherclauses,
+ List *hashclauses,
+ List *hashoperators,
+ List *hashcollations,
+ List *hashkeys,
+ Plan *lefttree,
+ Plan *righttree,
+ JoinType jointype,
+ bool inner_unique)
+{
+ HashJoin *node = makeNode(HashJoin);
+ Plan *plan = &node->join.plan;
+
+ plan->targetlist = tlist;
+ plan->qual = otherclauses;
+ plan->lefttree = lefttree;
+ plan->righttree = righttree;
+ node->hashclauses = hashclauses;
+ node->hashoperators = hashoperators;
+ node->hashcollations = hashcollations;
+ node->hashkeys = hashkeys;
+ node->join.jointype = jointype;
+ node->join.inner_unique = inner_unique;
+ node->join.joinqual = joinclauses;
+
+ return node;
+}
+
+static Hash *
+make_hash(Plan *lefttree,
+ List *hashkeys,
+ Oid skewTable,
+ AttrNumber skewColumn,
+ bool skewInherit)
+{
+ Hash *node = makeNode(Hash);
+ Plan *plan = &node->plan;
+
+ plan->targetlist = lefttree->targetlist;
+ plan->qual = NIL;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+
+ node->hashkeys = hashkeys;
+ node->skewTable = skewTable;
+ node->skewColumn = skewColumn;
+ node->skewInherit = skewInherit;
+
+ return node;
+}
+
+static MergeJoin *
+make_mergejoin(List *tlist,
+ List *joinclauses,
+ List *otherclauses,
+ List *mergeclauses,
+ Oid *mergefamilies,
+ Oid *mergecollations,
+ int *mergestrategies,
+ bool *mergenullsfirst,
+ Plan *lefttree,
+ Plan *righttree,
+ JoinType jointype,
+ bool inner_unique,
+ bool skip_mark_restore)
+{
+ MergeJoin *node = makeNode(MergeJoin);
+ Plan *plan = &node->join.plan;
+
+ plan->targetlist = tlist;
+ plan->qual = otherclauses;
+ plan->lefttree = lefttree;
+ plan->righttree = righttree;
+ node->skip_mark_restore = skip_mark_restore;
+ node->mergeclauses = mergeclauses;
+ node->mergeFamilies = mergefamilies;
+ node->mergeCollations = mergecollations;
+ node->mergeStrategies = mergestrategies;
+ node->mergeNullsFirst = mergenullsfirst;
+ node->join.jointype = jointype;
+ node->join.inner_unique = inner_unique;
+ node->join.joinqual = joinclauses;
+
+ return node;
+}
+
+/*
+ * make_sort --- basic routine to build a Sort plan node
+ *
+ * Caller must have built the sortColIdx, sortOperators, collations, and
+ * nullsFirst arrays already.
+ */
+static Sort *
+make_sort(Plan *lefttree, int numCols,
+ AttrNumber *sortColIdx, Oid *sortOperators,
+ Oid *collations, bool *nullsFirst)
+{
+ Sort *node;
+ Plan *plan;
+
+ node = makeNode(Sort);
+
+ plan = &node->plan;
+ plan->targetlist = lefttree->targetlist;
+ plan->qual = NIL;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+ node->numCols = numCols;
+ node->sortColIdx = sortColIdx;
+ node->sortOperators = sortOperators;
+ node->collations = collations;
+ node->nullsFirst = nullsFirst;
+
+ return node;
+}
+
+/*
+ * make_incrementalsort --- basic routine to build an IncrementalSort plan node
+ *
+ * Caller must have built the sortColIdx, sortOperators, collations, and
+ * nullsFirst arrays already.
+ */
+static IncrementalSort *
+make_incrementalsort(Plan *lefttree, int numCols, int nPresortedCols,
+ AttrNumber *sortColIdx, Oid *sortOperators,
+ Oid *collations, bool *nullsFirst)
+{
+ IncrementalSort *node;
+ Plan *plan;
+
+ node = makeNode(IncrementalSort);
+
+ plan = &node->sort.plan;
+ plan->targetlist = lefttree->targetlist;
+ plan->qual = NIL;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+ node->nPresortedCols = nPresortedCols;
+ node->sort.numCols = numCols;
+ node->sort.sortColIdx = sortColIdx;
+ node->sort.sortOperators = sortOperators;
+ node->sort.collations = collations;
+ node->sort.nullsFirst = nullsFirst;
+
+ return node;
+}
+
+/*
+ * prepare_sort_from_pathkeys
+ * Prepare to sort according to given pathkeys
+ *
+ * This is used to set up for Sort, MergeAppend, and Gather Merge nodes. It
+ * calculates the executor's representation of the sort key information, and
+ * adjusts the plan targetlist if needed to add resjunk sort columns.
+ *
+ * Input parameters:
+ * 'lefttree' is the plan node which yields input tuples
+ * 'pathkeys' is the list of pathkeys by which the result is to be sorted
+ * 'relids' identifies the child relation being sorted, if any
+ * 'reqColIdx' is NULL or an array of required sort key column numbers
+ * 'adjust_tlist_in_place' is true if lefttree must be modified in-place
+ *
+ * We must convert the pathkey information into arrays of sort key column
+ * numbers, sort operator OIDs, collation OIDs, and nulls-first flags,
+ * which is the representation the executor wants. These are returned into
+ * the output parameters *p_numsortkeys etc.
+ *
+ * When looking for matches to an EquivalenceClass's members, we will only
+ * consider child EC members if they belong to given 'relids'. This protects
+ * against possible incorrect matches to child expressions that contain no
+ * Vars.
+ *
+ * If reqColIdx isn't NULL then it contains sort key column numbers that
+ * we should match. This is used when making child plans for a MergeAppend;
+ * it's an error if we can't match the columns.
+ *
+ * If the pathkeys include expressions that aren't simple Vars, we will
+ * usually need to add resjunk items to the input plan's targetlist to
+ * compute these expressions, since a Sort or MergeAppend node itself won't
+ * do any such calculations. If the input plan type isn't one that can do
+ * projections, this means adding a Result node just to do the projection.
+ * However, the caller can pass adjust_tlist_in_place = true to force the
+ * lefttree tlist to be modified in-place regardless of whether the node type
+ * can project --- we use this for fixing the tlist of MergeAppend itself.
+ *
+ * Returns the node which is to be the input to the Sort (either lefttree,
+ * or a Result stacked atop lefttree).
+ */
+static Plan *
+prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys,
+ Relids relids,
+ const AttrNumber *reqColIdx,
+ bool adjust_tlist_in_place,
+ int *p_numsortkeys,
+ AttrNumber **p_sortColIdx,
+ Oid **p_sortOperators,
+ Oid **p_collations,
+ bool **p_nullsFirst)
+{
+ List *tlist = lefttree->targetlist;
+ ListCell *i;
+ int numsortkeys;
+ AttrNumber *sortColIdx;
+ Oid *sortOperators;
+ Oid *collations;
+ bool *nullsFirst;
+
+ /*
+ * We will need at most list_length(pathkeys) sort columns; possibly less
+ */
+ numsortkeys = list_length(pathkeys);
+ sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
+ sortOperators = (Oid *) palloc(numsortkeys * sizeof(Oid));
+ collations = (Oid *) palloc(numsortkeys * sizeof(Oid));
+ nullsFirst = (bool *) palloc(numsortkeys * sizeof(bool));
+
+ numsortkeys = 0;
+
+ foreach(i, pathkeys)
+ {
+ PathKey *pathkey = (PathKey *) lfirst(i);
+ EquivalenceClass *ec = pathkey->pk_eclass;
+ EquivalenceMember *em;
+ TargetEntry *tle = NULL;
+ Oid pk_datatype = InvalidOid;
+ Oid sortop;
+ ListCell *j;
+
+ if (ec->ec_has_volatile)
+ {
+ /*
+ * If the pathkey's EquivalenceClass is volatile, then it must
+ * have come from an ORDER BY clause, and we have to match it to
+ * that same targetlist entry.
+ */
+ if (ec->ec_sortref == 0) /* can't happen */
+ elog(ERROR, "volatile EquivalenceClass has no sortref");
+ tle = get_sortgroupref_tle(ec->ec_sortref, tlist);
+ Assert(tle);
+ Assert(list_length(ec->ec_members) == 1);
+ pk_datatype = ((EquivalenceMember *) linitial(ec->ec_members))->em_datatype;
+ }
+ else if (reqColIdx != NULL)
+ {
+ /*
+ * If we are given a sort column number to match, only consider
+ * the single TLE at that position. It's possible that there is
+ * no such TLE, in which case fall through and generate a resjunk
+ * targetentry (we assume this must have happened in the parent
+ * plan as well). If there is a TLE but it doesn't match the
+ * pathkey's EC, we do the same, which is probably the wrong thing
+ * but we'll leave it to caller to complain about the mismatch.
+ */
+ tle = get_tle_by_resno(tlist, reqColIdx[numsortkeys]);
+ if (tle)
+ {
+ em = find_ec_member_matching_expr(ec, tle->expr, relids);
+ if (em)
+ {
+ /* found expr at right place in tlist */
+ pk_datatype = em->em_datatype;
+ }
+ else
+ tle = NULL;
+ }
+ }
+ else
+ {
+ /*
+ * Otherwise, we can sort by any non-constant expression listed in
+ * the pathkey's EquivalenceClass. For now, we take the first
+ * tlist item found in the EC. If there's no match, we'll generate
+ * a resjunk entry using the first EC member that is an expression
+ * in the input's vars. (The non-const restriction only matters
+ * if the EC is below_outer_join; but if it isn't, it won't
+ * contain consts anyway, else we'd have discarded the pathkey as
+ * redundant.)
+ *
+ * XXX if we have a choice, is there any way of figuring out which
+ * might be cheapest to execute? (For example, int4lt is likely
+ * much cheaper to execute than numericlt, but both might appear
+ * in the same equivalence class...) Not clear that we ever will
+ * have an interesting choice in practice, so it may not matter.
+ */
+ foreach(j, tlist)
+ {
+ tle = (TargetEntry *) lfirst(j);
+ em = find_ec_member_matching_expr(ec, tle->expr, relids);
+ if (em)
+ {
+ /* found expr already in tlist */
+ pk_datatype = em->em_datatype;
+ break;
+ }
+ tle = NULL;
+ }
+ }
+
+ if (!tle)
+ {
+ /*
+ * No matching tlist item; look for a computable expression.
+ */
+ em = find_computable_ec_member(NULL, ec, tlist, relids, false);
+ if (!em)
+ elog(ERROR, "could not find pathkey item to sort");
+ pk_datatype = em->em_datatype;
+
+ /*
+ * Do we need to insert a Result node?
+ */
+ if (!adjust_tlist_in_place &&
+ !is_projection_capable_plan(lefttree))
+ {
+ /* copy needed so we don't modify input's tlist below */
+ tlist = copyObject(tlist);
+ lefttree = inject_projection_plan(lefttree, tlist,
+ lefttree->parallel_safe);
+ }
+
+ /* Don't bother testing is_projection_capable_plan again */
+ adjust_tlist_in_place = true;
+
+ /*
+ * Add resjunk entry to input's tlist
+ */
+ tle = makeTargetEntry(copyObject(em->em_expr),
+ list_length(tlist) + 1,
+ NULL,
+ true);
+ tlist = lappend(tlist, tle);
+ lefttree->targetlist = tlist; /* just in case NIL before */
+ }
+
+ /*
+ * Look up the correct sort operator from the PathKey's slightly
+ * abstracted representation.
+ */
+ sortop = get_opfamily_member(pathkey->pk_opfamily,
+ pk_datatype,
+ pk_datatype,
+ pathkey->pk_strategy);
+ if (!OidIsValid(sortop)) /* should not happen */
+ elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
+ pathkey->pk_strategy, pk_datatype, pk_datatype,
+ pathkey->pk_opfamily);
+
+ /* Add the column to the sort arrays */
+ sortColIdx[numsortkeys] = tle->resno;
+ sortOperators[numsortkeys] = sortop;
+ collations[numsortkeys] = ec->ec_collation;
+ nullsFirst[numsortkeys] = pathkey->pk_nulls_first;
+ numsortkeys++;
+ }
+
+ /* Return results */
+ *p_numsortkeys = numsortkeys;
+ *p_sortColIdx = sortColIdx;
+ *p_sortOperators = sortOperators;
+ *p_collations = collations;
+ *p_nullsFirst = nullsFirst;
+
+ return lefttree;
+}
+
+/*
+ * make_sort_from_pathkeys
+ * Create sort plan to sort according to given pathkeys
+ *
+ * 'lefttree' is the node which yields input tuples
+ * 'pathkeys' is the list of pathkeys by which the result is to be sorted
+ * 'relids' is the set of relations required by prepare_sort_from_pathkeys()
+ */
+static Sort *
+make_sort_from_pathkeys(Plan *lefttree, List *pathkeys, Relids relids)
+{
+ int numsortkeys;
+ AttrNumber *sortColIdx;
+ Oid *sortOperators;
+ Oid *collations;
+ bool *nullsFirst;
+
+ /* Compute sort column info, and adjust lefttree as needed */
+ lefttree = prepare_sort_from_pathkeys(lefttree, pathkeys,
+ relids,
+ NULL,
+ false,
+ &numsortkeys,
+ &sortColIdx,
+ &sortOperators,
+ &collations,
+ &nullsFirst);
+
+ /* Now build the Sort node */
+ return make_sort(lefttree, numsortkeys,
+ sortColIdx, sortOperators,
+ collations, nullsFirst);
+}
+
+/*
+ * make_incrementalsort_from_pathkeys
+ * Create sort plan to sort according to given pathkeys
+ *
+ * 'lefttree' is the node which yields input tuples
+ * 'pathkeys' is the list of pathkeys by which the result is to be sorted
+ * 'relids' is the set of relations required by prepare_sort_from_pathkeys()
+ * 'nPresortedCols' is the number of presorted columns in input tuples
+ */
+static IncrementalSort *
+make_incrementalsort_from_pathkeys(Plan *lefttree, List *pathkeys,
+ Relids relids, int nPresortedCols)
+{
+ int numsortkeys;
+ AttrNumber *sortColIdx;
+ Oid *sortOperators;
+ Oid *collations;
+ bool *nullsFirst;
+
+ /* Compute sort column info, and adjust lefttree as needed */
+ lefttree = prepare_sort_from_pathkeys(lefttree, pathkeys,
+ relids,
+ NULL,
+ false,
+ &numsortkeys,
+ &sortColIdx,
+ &sortOperators,
+ &collations,
+ &nullsFirst);
+
+ /* Now build the Sort node */
+ return make_incrementalsort(lefttree, numsortkeys, nPresortedCols,
+ sortColIdx, sortOperators,
+ collations, nullsFirst);
+}
+
+/*
+ * make_sort_from_sortclauses
+ * Create sort plan to sort according to given sortclauses
+ *
+ * 'sortcls' is a list of SortGroupClauses
+ * 'lefttree' is the node which yields input tuples
+ */
+Sort *
+make_sort_from_sortclauses(List *sortcls, Plan *lefttree)
+{
+ List *sub_tlist = lefttree->targetlist;
+ ListCell *l;
+ int numsortkeys;
+ AttrNumber *sortColIdx;
+ Oid *sortOperators;
+ Oid *collations;
+ bool *nullsFirst;
+
+ /* Convert list-ish representation to arrays wanted by executor */
+ numsortkeys = list_length(sortcls);
+ sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
+ sortOperators = (Oid *) palloc(numsortkeys * sizeof(Oid));
+ collations = (Oid *) palloc(numsortkeys * sizeof(Oid));
+ nullsFirst = (bool *) palloc(numsortkeys * sizeof(bool));
+
+ numsortkeys = 0;
+ foreach(l, sortcls)
+ {
+ SortGroupClause *sortcl = (SortGroupClause *) lfirst(l);
+ TargetEntry *tle = get_sortgroupclause_tle(sortcl, sub_tlist);
+
+ sortColIdx[numsortkeys] = tle->resno;
+ sortOperators[numsortkeys] = sortcl->sortop;
+ collations[numsortkeys] = exprCollation((Node *) tle->expr);
+ nullsFirst[numsortkeys] = sortcl->nulls_first;
+ numsortkeys++;
+ }
+
+ return make_sort(lefttree, numsortkeys,
+ sortColIdx, sortOperators,
+ collations, nullsFirst);
+}
+
+/*
+ * make_sort_from_groupcols
+ * Create sort plan to sort based on grouping columns
+ *
+ * 'groupcls' is the list of SortGroupClauses
+ * 'grpColIdx' gives the column numbers to use
+ *
+ * This might look like it could be merged with make_sort_from_sortclauses,
+ * but presently we *must* use the grpColIdx[] array to locate sort columns,
+ * because the child plan's tlist is not marked with ressortgroupref info
+ * appropriate to the grouping node. So, only the sort ordering info
+ * is used from the SortGroupClause entries.
+ */
+static Sort *
+make_sort_from_groupcols(List *groupcls,
+ AttrNumber *grpColIdx,
+ Plan *lefttree)
+{
+ List *sub_tlist = lefttree->targetlist;
+ ListCell *l;
+ int numsortkeys;
+ AttrNumber *sortColIdx;
+ Oid *sortOperators;
+ Oid *collations;
+ bool *nullsFirst;
+
+ /* Convert list-ish representation to arrays wanted by executor */
+ numsortkeys = list_length(groupcls);
+ sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
+ sortOperators = (Oid *) palloc(numsortkeys * sizeof(Oid));
+ collations = (Oid *) palloc(numsortkeys * sizeof(Oid));
+ nullsFirst = (bool *) palloc(numsortkeys * sizeof(bool));
+
+ numsortkeys = 0;
+ foreach(l, groupcls)
+ {
+ SortGroupClause *grpcl = (SortGroupClause *) lfirst(l);
+ TargetEntry *tle = get_tle_by_resno(sub_tlist, grpColIdx[numsortkeys]);
+
+ if (!tle)
+ elog(ERROR, "could not retrieve tle for sort-from-groupcols");
+
+ sortColIdx[numsortkeys] = tle->resno;
+ sortOperators[numsortkeys] = grpcl->sortop;
+ collations[numsortkeys] = exprCollation((Node *) tle->expr);
+ nullsFirst[numsortkeys] = grpcl->nulls_first;
+ numsortkeys++;
+ }
+
+ return make_sort(lefttree, numsortkeys,
+ sortColIdx, sortOperators,
+ collations, nullsFirst);
+}
+
+static Material *
+make_material(Plan *lefttree)
+{
+ Material *node = makeNode(Material);
+ Plan *plan = &node->plan;
+
+ plan->targetlist = lefttree->targetlist;
+ plan->qual = NIL;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+
+ return node;
+}
+
+/*
+ * materialize_finished_plan: stick a Material node atop a completed plan
+ *
+ * There are a couple of places where we want to attach a Material node
+ * after completion of create_plan(), without any MaterialPath path.
+ * Those places should probably be refactored someday to do this on the
+ * Path representation, but it's not worth the trouble yet.
+ */
+Plan *
+materialize_finished_plan(Plan *subplan)
+{
+ Plan *matplan;
+ Path matpath; /* dummy for result of cost_material */
+
+ matplan = (Plan *) make_material(subplan);
+
+ /*
+ * XXX horrid kluge: if there are any initPlans attached to the subplan,
+ * move them up to the Material node, which is now effectively the top
+ * plan node in its query level. This prevents failure in
+ * SS_finalize_plan(), which see for comments. We don't bother adjusting
+ * the subplan's cost estimate for this.
+ */
+ matplan->initPlan = subplan->initPlan;
+ subplan->initPlan = NIL;
+
+ /* Set cost data */
+ cost_material(&matpath,
+ subplan->startup_cost,
+ subplan->total_cost,
+ subplan->plan_rows,
+ subplan->plan_width);
+ matplan->startup_cost = matpath.startup_cost;
+ matplan->total_cost = matpath.total_cost;
+ matplan->plan_rows = subplan->plan_rows;
+ matplan->plan_width = subplan->plan_width;
+ matplan->parallel_aware = false;
+ matplan->parallel_safe = subplan->parallel_safe;
+
+ return matplan;
+}
+
+static Memoize *
+make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations,
+ List *param_exprs, bool singlerow, bool binary_mode,
+ uint32 est_entries, Bitmapset *keyparamids)
+{
+ Memoize *node = makeNode(Memoize);
+ Plan *plan = &node->plan;
+
+ plan->targetlist = lefttree->targetlist;
+ plan->qual = NIL;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+
+ node->numKeys = list_length(param_exprs);
+ node->hashOperators = hashoperators;
+ node->collations = collations;
+ node->param_exprs = param_exprs;
+ node->singlerow = singlerow;
+ node->binary_mode = binary_mode;
+ node->est_entries = est_entries;
+ node->keyparamids = keyparamids;
+
+ return node;
+}
+
+Agg *
+make_agg(List *tlist, List *qual,
+ AggStrategy aggstrategy, AggSplit aggsplit,
+ int numGroupCols, AttrNumber *grpColIdx, Oid *grpOperators, Oid *grpCollations,
+ List *groupingSets, List *chain, double dNumGroups,
+ Size transitionSpace, Plan *lefttree)
+{
+ Agg *node = makeNode(Agg);
+ Plan *plan = &node->plan;
+ long numGroups;
+
+ /* Reduce to long, but 'ware overflow! */
+ numGroups = clamp_cardinality_to_long(dNumGroups);
+
+ node->aggstrategy = aggstrategy;
+ node->aggsplit = aggsplit;
+ node->numCols = numGroupCols;
+ node->grpColIdx = grpColIdx;
+ node->grpOperators = grpOperators;
+ node->grpCollations = grpCollations;
+ node->numGroups = numGroups;
+ node->transitionSpace = transitionSpace;
+ node->aggParams = NULL; /* SS_finalize_plan() will fill this */
+ node->groupingSets = groupingSets;
+ node->chain = chain;
+
+ plan->qual = qual;
+ plan->targetlist = tlist;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+
+ return node;
+}
+
+static WindowAgg *
+make_windowagg(List *tlist, Index winref,
+ int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
+ int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
+ int frameOptions, Node *startOffset, Node *endOffset,
+ Oid startInRangeFunc, Oid endInRangeFunc,
+ Oid inRangeColl, bool inRangeAsc, bool inRangeNullsFirst,
+ List *runCondition, List *qual, bool topWindow, Plan *lefttree)
+{
+ WindowAgg *node = makeNode(WindowAgg);
+ Plan *plan = &node->plan;
+
+ node->winref = winref;
+ node->partNumCols = partNumCols;
+ node->partColIdx = partColIdx;
+ node->partOperators = partOperators;
+ node->partCollations = partCollations;
+ node->ordNumCols = ordNumCols;
+ node->ordColIdx = ordColIdx;
+ node->ordOperators = ordOperators;
+ node->ordCollations = ordCollations;
+ node->frameOptions = frameOptions;
+ node->startOffset = startOffset;
+ node->endOffset = endOffset;
+ node->runCondition = runCondition;
+ /* a duplicate of the above for EXPLAIN */
+ node->runConditionOrig = runCondition;
+ node->startInRangeFunc = startInRangeFunc;
+ node->endInRangeFunc = endInRangeFunc;
+ node->inRangeColl = inRangeColl;
+ node->inRangeAsc = inRangeAsc;
+ node->inRangeNullsFirst = inRangeNullsFirst;
+ node->topWindow = topWindow;
+
+ plan->targetlist = tlist;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+ plan->qual = qual;
+
+ return node;
+}
+
+static Group *
+make_group(List *tlist,
+ List *qual,
+ int numGroupCols,
+ AttrNumber *grpColIdx,
+ Oid *grpOperators,
+ Oid *grpCollations,
+ Plan *lefttree)
+{
+ Group *node = makeNode(Group);
+ Plan *plan = &node->plan;
+
+ node->numCols = numGroupCols;
+ node->grpColIdx = grpColIdx;
+ node->grpOperators = grpOperators;
+ node->grpCollations = grpCollations;
+
+ plan->qual = qual;
+ plan->targetlist = tlist;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+
+ return node;
+}
+
+/*
+ * distinctList is a list of SortGroupClauses, identifying the targetlist items
+ * that should be considered by the Unique filter. The input path must
+ * already be sorted accordingly.
+ */
+static Unique *
+make_unique_from_sortclauses(Plan *lefttree, List *distinctList)
+{
+ Unique *node = makeNode(Unique);
+ Plan *plan = &node->plan;
+ int numCols = list_length(distinctList);
+ int keyno = 0;
+ AttrNumber *uniqColIdx;
+ Oid *uniqOperators;
+ Oid *uniqCollations;
+ ListCell *slitem;
+
+ plan->targetlist = lefttree->targetlist;
+ plan->qual = NIL;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+
+ /*
+ * convert SortGroupClause list into arrays of attr indexes and equality
+ * operators, as wanted by executor
+ */
+ Assert(numCols > 0);
+ uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
+ uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols);
+ uniqCollations = (Oid *) palloc(sizeof(Oid) * numCols);
+
+ foreach(slitem, distinctList)
+ {
+ SortGroupClause *sortcl = (SortGroupClause *) lfirst(slitem);
+ TargetEntry *tle = get_sortgroupclause_tle(sortcl, plan->targetlist);
+
+ uniqColIdx[keyno] = tle->resno;
+ uniqOperators[keyno] = sortcl->eqop;
+ uniqCollations[keyno] = exprCollation((Node *) tle->expr);
+ Assert(OidIsValid(uniqOperators[keyno]));
+ keyno++;
+ }
+
+ node->numCols = numCols;
+ node->uniqColIdx = uniqColIdx;
+ node->uniqOperators = uniqOperators;
+ node->uniqCollations = uniqCollations;
+
+ return node;
+}
+
+/*
+ * as above, but use pathkeys to identify the sort columns and semantics
+ */
+static Unique *
+make_unique_from_pathkeys(Plan *lefttree, List *pathkeys, int numCols)
+{
+ Unique *node = makeNode(Unique);
+ Plan *plan = &node->plan;
+ int keyno = 0;
+ AttrNumber *uniqColIdx;
+ Oid *uniqOperators;
+ Oid *uniqCollations;
+ ListCell *lc;
+
+ plan->targetlist = lefttree->targetlist;
+ plan->qual = NIL;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+
+ /*
+ * Convert pathkeys list into arrays of attr indexes and equality
+ * operators, as wanted by executor. This has a lot in common with
+ * prepare_sort_from_pathkeys ... maybe unify sometime?
+ */
+ Assert(numCols >= 0 && numCols <= list_length(pathkeys));
+ uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
+ uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols);
+ uniqCollations = (Oid *) palloc(sizeof(Oid) * numCols);
+
+ foreach(lc, pathkeys)
+ {
+ PathKey *pathkey = (PathKey *) lfirst(lc);
+ EquivalenceClass *ec = pathkey->pk_eclass;
+ EquivalenceMember *em;
+ TargetEntry *tle = NULL;
+ Oid pk_datatype = InvalidOid;
+ Oid eqop;
+ ListCell *j;
+
+ /* Ignore pathkeys beyond the specified number of columns */
+ if (keyno >= numCols)
+ break;
+
+ if (ec->ec_has_volatile)
+ {
+ /*
+ * If the pathkey's EquivalenceClass is volatile, then it must
+ * have come from an ORDER BY clause, and we have to match it to
+ * that same targetlist entry.
+ */
+ if (ec->ec_sortref == 0) /* can't happen */
+ elog(ERROR, "volatile EquivalenceClass has no sortref");
+ tle = get_sortgroupref_tle(ec->ec_sortref, plan->targetlist);
+ Assert(tle);
+ Assert(list_length(ec->ec_members) == 1);
+ pk_datatype = ((EquivalenceMember *) linitial(ec->ec_members))->em_datatype;
+ }
+ else
+ {
+ /*
+ * Otherwise, we can use any non-constant expression listed in the
+ * pathkey's EquivalenceClass. For now, we take the first tlist
+ * item found in the EC.
+ */
+ foreach(j, plan->targetlist)
+ {
+ tle = (TargetEntry *) lfirst(j);
+ em = find_ec_member_matching_expr(ec, tle->expr, NULL);
+ if (em)
+ {
+ /* found expr already in tlist */
+ pk_datatype = em->em_datatype;
+ break;
+ }
+ tle = NULL;
+ }
+ }
+
+ if (!tle)
+ elog(ERROR, "could not find pathkey item to sort");
+
+ /*
+ * Look up the correct equality operator from the PathKey's slightly
+ * abstracted representation.
+ */
+ eqop = get_opfamily_member(pathkey->pk_opfamily,
+ pk_datatype,
+ pk_datatype,
+ BTEqualStrategyNumber);
+ if (!OidIsValid(eqop)) /* should not happen */
+ elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
+ BTEqualStrategyNumber, pk_datatype, pk_datatype,
+ pathkey->pk_opfamily);
+
+ uniqColIdx[keyno] = tle->resno;
+ uniqOperators[keyno] = eqop;
+ uniqCollations[keyno] = ec->ec_collation;
+
+ keyno++;
+ }
+
+ node->numCols = numCols;
+ node->uniqColIdx = uniqColIdx;
+ node->uniqOperators = uniqOperators;
+ node->uniqCollations = uniqCollations;
+
+ return node;
+}
+
+static Gather *
+make_gather(List *qptlist,
+ List *qpqual,
+ int nworkers,
+ int rescan_param,
+ bool single_copy,
+ Plan *subplan)
+{
+ Gather *node = makeNode(Gather);
+ Plan *plan = &node->plan;
+
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = subplan;
+ plan->righttree = NULL;
+ node->num_workers = nworkers;
+ node->rescan_param = rescan_param;
+ node->single_copy = single_copy;
+ node->invisible = false;
+ node->initParam = NULL;
+
+ return node;
+}
+
+/*
+ * distinctList is a list of SortGroupClauses, identifying the targetlist
+ * items that should be considered by the SetOp filter. The input path must
+ * already be sorted accordingly.
+ */
+static SetOp *
+make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree,
+ List *distinctList, AttrNumber flagColIdx, int firstFlag,
+ long numGroups)
+{
+ SetOp *node = makeNode(SetOp);
+ Plan *plan = &node->plan;
+ int numCols = list_length(distinctList);
+ int keyno = 0;
+ AttrNumber *dupColIdx;
+ Oid *dupOperators;
+ Oid *dupCollations;
+ ListCell *slitem;
+
+ plan->targetlist = lefttree->targetlist;
+ plan->qual = NIL;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+
+ /*
+ * convert SortGroupClause list into arrays of attr indexes and equality
+ * operators, as wanted by executor
+ */
+ dupColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
+ dupOperators = (Oid *) palloc(sizeof(Oid) * numCols);
+ dupCollations = (Oid *) palloc(sizeof(Oid) * numCols);
+
+ foreach(slitem, distinctList)
+ {
+ SortGroupClause *sortcl = (SortGroupClause *) lfirst(slitem);
+ TargetEntry *tle = get_sortgroupclause_tle(sortcl, plan->targetlist);
+
+ dupColIdx[keyno] = tle->resno;
+ dupOperators[keyno] = sortcl->eqop;
+ dupCollations[keyno] = exprCollation((Node *) tle->expr);
+ Assert(OidIsValid(dupOperators[keyno]));
+ keyno++;
+ }
+
+ node->cmd = cmd;
+ node->strategy = strategy;
+ node->numCols = numCols;
+ node->dupColIdx = dupColIdx;
+ node->dupOperators = dupOperators;
+ node->dupCollations = dupCollations;
+ node->flagColIdx = flagColIdx;
+ node->firstFlag = firstFlag;
+ node->numGroups = numGroups;
+
+ return node;
+}
+
+/*
+ * make_lockrows
+ * Build a LockRows plan node
+ */
+static LockRows *
+make_lockrows(Plan *lefttree, List *rowMarks, int epqParam)
+{
+ LockRows *node = makeNode(LockRows);
+ Plan *plan = &node->plan;
+
+ plan->targetlist = lefttree->targetlist;
+ plan->qual = NIL;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+
+ node->rowMarks = rowMarks;
+ node->epqParam = epqParam;
+
+ return node;
+}
+
+/*
+ * make_limit
+ * Build a Limit plan node
+ */
+Limit *
+make_limit(Plan *lefttree, Node *limitOffset, Node *limitCount,
+ LimitOption limitOption, int uniqNumCols, AttrNumber *uniqColIdx,
+ Oid *uniqOperators, Oid *uniqCollations)
+{
+ Limit *node = makeNode(Limit);
+ Plan *plan = &node->plan;
+
+ plan->targetlist = lefttree->targetlist;
+ plan->qual = NIL;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+
+ node->limitOffset = limitOffset;
+ node->limitCount = limitCount;
+ node->limitOption = limitOption;
+ node->uniqNumCols = uniqNumCols;
+ node->uniqColIdx = uniqColIdx;
+ node->uniqOperators = uniqOperators;
+ node->uniqCollations = uniqCollations;
+
+ return node;
+}
+
+/*
+ * make_result
+ * Build a Result plan node
+ */
+static Result *
+make_result(List *tlist,
+ Node *resconstantqual,
+ Plan *subplan)
+{
+ Result *node = makeNode(Result);
+ Plan *plan = &node->plan;
+
+ plan->targetlist = tlist;
+ plan->qual = NIL;
+ plan->lefttree = subplan;
+ plan->righttree = NULL;
+ node->resconstantqual = resconstantqual;
+
+ return node;
+}
+
+/*
+ * make_project_set
+ * Build a ProjectSet plan node
+ */
+static ProjectSet *
+make_project_set(List *tlist,
+ Plan *subplan)
+{
+ ProjectSet *node = makeNode(ProjectSet);
+ Plan *plan = &node->plan;
+
+ plan->targetlist = tlist;
+ plan->qual = NIL;
+ plan->lefttree = subplan;
+ plan->righttree = NULL;
+
+ return node;
+}
+
+/*
+ * make_modifytable
+ * Build a ModifyTable plan node
+ */
+static ModifyTable *
+make_modifytable(PlannerInfo *root, Plan *subplan,
+ CmdType operation, bool canSetTag,
+ Index nominalRelation, Index rootRelation,
+ bool partColsUpdated,
+ List *resultRelations,
+ List *updateColnosLists,
+ List *withCheckOptionLists, List *returningLists,
+ List *rowMarks, OnConflictExpr *onconflict,
+ List *mergeActionLists, int epqParam)
+{
+ ModifyTable *node = makeNode(ModifyTable);
+ List *fdw_private_list;
+ Bitmapset *direct_modify_plans;
+ ListCell *lc;
+ int i;
+
+ Assert(operation == CMD_MERGE ||
+ (operation == CMD_UPDATE ?
+ list_length(resultRelations) == list_length(updateColnosLists) :
+ updateColnosLists == NIL));
+ Assert(withCheckOptionLists == NIL ||
+ list_length(resultRelations) == list_length(withCheckOptionLists));
+ Assert(returningLists == NIL ||
+ list_length(resultRelations) == list_length(returningLists));
+
+ node->plan.lefttree = subplan;
+ node->plan.righttree = NULL;
+ node->plan.qual = NIL;
+ /* setrefs.c will fill in the targetlist, if needed */
+ node->plan.targetlist = NIL;
+
+ node->operation = operation;
+ node->canSetTag = canSetTag;
+ node->nominalRelation = nominalRelation;
+ node->rootRelation = rootRelation;
+ node->partColsUpdated = partColsUpdated;
+ node->resultRelations = resultRelations;
+ if (!onconflict)
+ {
+ node->onConflictAction = ONCONFLICT_NONE;
+ node->onConflictSet = NIL;
+ node->onConflictCols = NIL;
+ node->onConflictWhere = NULL;
+ node->arbiterIndexes = NIL;
+ node->exclRelRTI = 0;
+ node->exclRelTlist = NIL;
+ }
+ else
+ {
+ node->onConflictAction = onconflict->action;
+
+ /*
+ * Here we convert the ON CONFLICT UPDATE tlist, if any, to the
+ * executor's convention of having consecutive resno's. The actual
+ * target column numbers are saved in node->onConflictCols. (This
+ * could be done earlier, but there seems no need to.)
+ */
+ node->onConflictSet = onconflict->onConflictSet;
+ node->onConflictCols =
+ extract_update_targetlist_colnos(node->onConflictSet);
+ node->onConflictWhere = onconflict->onConflictWhere;
+
+ /*
+ * If a set of unique index inference elements was provided (an
+ * INSERT...ON CONFLICT "inference specification"), then infer
+ * appropriate unique indexes (or throw an error if none are
+ * available).
+ */
+ node->arbiterIndexes = infer_arbiter_indexes(root);
+
+ node->exclRelRTI = onconflict->exclRelIndex;
+ node->exclRelTlist = onconflict->exclRelTlist;
+ }
+ node->updateColnosLists = updateColnosLists;
+ node->withCheckOptionLists = withCheckOptionLists;
+ node->returningLists = returningLists;
+ node->rowMarks = rowMarks;
+ node->mergeActionLists = mergeActionLists;
+ node->epqParam = epqParam;
+
+ /*
+ * For each result relation that is a foreign table, allow the FDW to
+ * construct private plan data, and accumulate it all into a list.
+ */
+ fdw_private_list = NIL;
+ direct_modify_plans = NULL;
+ i = 0;
+ foreach(lc, resultRelations)
+ {
+ Index rti = lfirst_int(lc);
+ FdwRoutine *fdwroutine;
+ List *fdw_private;
+ bool direct_modify;
+
+ /*
+ * If possible, we want to get the FdwRoutine from our RelOptInfo for
+ * the table. But sometimes we don't have a RelOptInfo and must get
+ * it the hard way. (In INSERT, the target relation is not scanned,
+ * so it's not a baserel; and there are also corner cases for
+ * updatable views where the target rel isn't a baserel.)
+ */
+ if (rti < root->simple_rel_array_size &&
+ root->simple_rel_array[rti] != NULL)
+ {
+ RelOptInfo *resultRel = root->simple_rel_array[rti];
+
+ fdwroutine = resultRel->fdwroutine;
+
+ /*
+ * MERGE is not currently supported for foreign tables and we
+ * already checked when the table mentioned in the query is
+ * foreign; but we can still get here if a partitioned table has a
+ * foreign table as partition. Disallow that now, to avoid an
+ * uglier error message later.
+ */
+ if (operation == CMD_MERGE && fdwroutine != NULL)
+ {
+ RangeTblEntry *rte = root->simple_rte_array[rti];
+
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot execute MERGE on relation \"%s\"",
+ get_rel_name(rte->relid)),
+ errdetail_relkind_not_supported(rte->relkind));
+ }
+
+ }
+ else
+ {
+ RangeTblEntry *rte = planner_rt_fetch(rti, root);
+
+ Assert(rte->rtekind == RTE_RELATION);
+ Assert(operation != CMD_MERGE);
+ if (rte->relkind == RELKIND_FOREIGN_TABLE)
+ fdwroutine = GetFdwRoutineByRelId(rte->relid);
+ else
+ fdwroutine = NULL;
+ }
+
+ /*
+ * Try to modify the foreign table directly if (1) the FDW provides
+ * callback functions needed for that and (2) there are no local
+ * structures that need to be run for each modified row: row-level
+ * triggers on the foreign table, stored generated columns, WITH CHECK
+ * OPTIONs from parent views.
+ */
+ direct_modify = false;
+ if (fdwroutine != NULL &&
+ fdwroutine->PlanDirectModify != NULL &&
+ fdwroutine->BeginDirectModify != NULL &&
+ fdwroutine->IterateDirectModify != NULL &&
+ fdwroutine->EndDirectModify != NULL &&
+ withCheckOptionLists == NIL &&
+ !has_row_triggers(root, rti, operation) &&
+ !has_stored_generated_columns(root, rti))
+ direct_modify = fdwroutine->PlanDirectModify(root, node, rti, i);
+ if (direct_modify)
+ direct_modify_plans = bms_add_member(direct_modify_plans, i);
+
+ if (!direct_modify &&
+ fdwroutine != NULL &&
+ fdwroutine->PlanForeignModify != NULL)
+ fdw_private = fdwroutine->PlanForeignModify(root, node, rti, i);
+ else
+ fdw_private = NIL;
+ fdw_private_list = lappend(fdw_private_list, fdw_private);
+ i++;
+ }
+ node->fdwPrivLists = fdw_private_list;
+ node->fdwDirectModifyPlans = direct_modify_plans;
+
+ return node;
+}
+
+/*
+ * is_projection_capable_path
+ * Check whether a given Path node is able to do projection.
+ */
+bool
+is_projection_capable_path(Path *path)
+{
+ /* Most plan types can project, so just list the ones that can't */
+ switch (path->pathtype)
+ {
+ case T_Hash:
+ case T_Material:
+ case T_Memoize:
+ case T_Sort:
+ case T_IncrementalSort:
+ case T_Unique:
+ case T_SetOp:
+ case T_LockRows:
+ case T_Limit:
+ case T_ModifyTable:
+ case T_MergeAppend:
+ case T_RecursiveUnion:
+ return false;
+ case T_CustomScan:
+ if (castNode(CustomPath, path)->flags & CUSTOMPATH_SUPPORT_PROJECTION)
+ return true;
+ return false;
+ case T_Append:
+
+ /*
+ * Append can't project, but if an AppendPath is being used to
+ * represent a dummy path, what will actually be generated is a
+ * Result which can project.
+ */
+ return IS_DUMMY_APPEND(path);
+ case T_ProjectSet:
+
+ /*
+ * Although ProjectSet certainly projects, say "no" because we
+ * don't want the planner to randomly replace its tlist with
+ * something else; the SRFs have to stay at top level. This might
+ * get relaxed later.
+ */
+ return false;
+ default:
+ break;
+ }
+ return true;
+}
+
+/*
+ * is_projection_capable_plan
+ * Check whether a given Plan node is able to do projection.
+ */
+bool
+is_projection_capable_plan(Plan *plan)
+{
+ /* Most plan types can project, so just list the ones that can't */
+ switch (nodeTag(plan))
+ {
+ case T_Hash:
+ case T_Material:
+ case T_Memoize:
+ case T_Sort:
+ case T_Unique:
+ case T_SetOp:
+ case T_LockRows:
+ case T_Limit:
+ case T_ModifyTable:
+ case T_Append:
+ case T_MergeAppend:
+ case T_RecursiveUnion:
+ return false;
+ case T_CustomScan:
+ if (((CustomScan *) plan)->flags & CUSTOMPATH_SUPPORT_PROJECTION)
+ return true;
+ return false;
+ case T_ProjectSet:
+
+ /*
+ * Although ProjectSet certainly projects, say "no" because we
+ * don't want the planner to randomly replace its tlist with
+ * something else; the SRFs have to stay at top level. This might
+ * get relaxed later.
+ */
+ return false;
+ default:
+ break;
+ }
+ return true;
+}
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
new file mode 100644
index 0000000..023efba
--- /dev/null
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -0,0 +1,2752 @@
+/*-------------------------------------------------------------------------
+ *
+ * initsplan.c
+ * Target list, qualification, joininfo initialization routines
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/plan/initsplan.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "catalog/pg_class.h"
+#include "catalog/pg_type.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/inherit.h"
+#include "optimizer/joininfo.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/placeholder.h"
+#include "optimizer/planmain.h"
+#include "optimizer/planner.h"
+#include "optimizer/prep.h"
+#include "optimizer/restrictinfo.h"
+#include "parser/analyze.h"
+#include "rewrite/rewriteManip.h"
+#include "utils/lsyscache.h"
+#include "utils/typcache.h"
+
+/* These parameters are set by GUC */
+int from_collapse_limit;
+int join_collapse_limit;
+
+
+/* Elements of the postponed_qual_list used during deconstruct_recurse */
+typedef struct PostponedQual
+{
+ Node *qual; /* a qual clause waiting to be processed */
+ Relids relids; /* the set of baserels it references */
+} PostponedQual;
+
+
+static void extract_lateral_references(PlannerInfo *root, RelOptInfo *brel,
+ Index rtindex);
+static List *deconstruct_recurse(PlannerInfo *root, Node *jtnode,
+ bool below_outer_join,
+ Relids *qualscope, Relids *inner_join_rels,
+ List **postponed_qual_list);
+static void process_security_barrier_quals(PlannerInfo *root,
+ int rti, Relids qualscope,
+ bool below_outer_join);
+static SpecialJoinInfo *make_outerjoininfo(PlannerInfo *root,
+ Relids left_rels, Relids right_rels,
+ Relids inner_join_rels,
+ JoinType jointype, List *clause);
+static void compute_semijoin_info(PlannerInfo *root, SpecialJoinInfo *sjinfo,
+ List *clause);
+static void distribute_qual_to_rels(PlannerInfo *root, Node *clause,
+ bool below_outer_join,
+ JoinType jointype,
+ Index security_level,
+ Relids qualscope,
+ Relids ojscope,
+ Relids outerjoin_nonnullable,
+ List **postponed_qual_list);
+static bool check_outerjoin_delay(PlannerInfo *root, Relids *relids_p,
+ Relids *nullable_relids_p, bool is_pushed_down);
+static bool check_equivalence_delay(PlannerInfo *root,
+ RestrictInfo *restrictinfo);
+static bool check_redundant_nullability_qual(PlannerInfo *root, Node *clause);
+static void check_mergejoinable(RestrictInfo *restrictinfo);
+static void check_hashjoinable(RestrictInfo *restrictinfo);
+static void check_memoizable(RestrictInfo *restrictinfo);
+
+
+/*****************************************************************************
+ *
+ * JOIN TREES
+ *
+ *****************************************************************************/
+
+/*
+ * add_base_rels_to_query
+ *
+ * Scan the query's jointree and create baserel RelOptInfos for all
+ * the base relations (e.g., table, subquery, and function RTEs)
+ * appearing in the jointree.
+ *
+ * The initial invocation must pass root->parse->jointree as the value of
+ * jtnode. Internally, the function recurses through the jointree.
+ *
+ * At the end of this process, there should be one baserel RelOptInfo for
+ * every non-join RTE that is used in the query. Some of the baserels
+ * may be appendrel parents, which will require additional "otherrel"
+ * RelOptInfos for their member rels, but those are added later.
+ */
+void
+add_base_rels_to_query(PlannerInfo *root, Node *jtnode)
+{
+ if (jtnode == NULL)
+ return;
+ if (IsA(jtnode, RangeTblRef))
+ {
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+
+ (void) build_simple_rel(root, varno, NULL);
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ ListCell *l;
+
+ foreach(l, f->fromlist)
+ add_base_rels_to_query(root, lfirst(l));
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+
+ add_base_rels_to_query(root, j->larg);
+ add_base_rels_to_query(root, j->rarg);
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+}
+
+/*
+ * add_other_rels_to_query
+ * create "otherrel" RelOptInfos for the children of appendrel baserels
+ *
+ * At the end of this process, there should be RelOptInfos for all relations
+ * that will be scanned by the query.
+ */
+void
+add_other_rels_to_query(PlannerInfo *root)
+{
+ int rti;
+
+ for (rti = 1; rti < root->simple_rel_array_size; rti++)
+ {
+ RelOptInfo *rel = root->simple_rel_array[rti];
+ RangeTblEntry *rte = root->simple_rte_array[rti];
+
+ /* there may be empty slots corresponding to non-baserel RTEs */
+ if (rel == NULL)
+ continue;
+
+ /* Ignore any "otherrels" that were already added. */
+ if (rel->reloptkind != RELOPT_BASEREL)
+ continue;
+
+ /* If it's marked as inheritable, look for children. */
+ if (rte->inh)
+ expand_inherited_rtentry(root, rel, rte, rti);
+ }
+}
+
+
+/*****************************************************************************
+ *
+ * TARGET LISTS
+ *
+ *****************************************************************************/
+
+/*
+ * build_base_rel_tlists
+ * Add targetlist entries for each var needed in the query's final tlist
+ * (and HAVING clause, if any) to the appropriate base relations.
+ *
+ * We mark such vars as needed by "relation 0" to ensure that they will
+ * propagate up through all join plan steps.
+ */
+void
+build_base_rel_tlists(PlannerInfo *root, List *final_tlist)
+{
+ List *tlist_vars = pull_var_clause((Node *) final_tlist,
+ PVC_RECURSE_AGGREGATES |
+ PVC_RECURSE_WINDOWFUNCS |
+ PVC_INCLUDE_PLACEHOLDERS);
+
+ if (tlist_vars != NIL)
+ {
+ add_vars_to_targetlist(root, tlist_vars, bms_make_singleton(0), true);
+ list_free(tlist_vars);
+ }
+
+ /*
+ * If there's a HAVING clause, we'll need the Vars it uses, too. Note
+ * that HAVING can contain Aggrefs but not WindowFuncs.
+ */
+ if (root->parse->havingQual)
+ {
+ List *having_vars = pull_var_clause(root->parse->havingQual,
+ PVC_RECURSE_AGGREGATES |
+ PVC_INCLUDE_PLACEHOLDERS);
+
+ if (having_vars != NIL)
+ {
+ add_vars_to_targetlist(root, having_vars,
+ bms_make_singleton(0), true);
+ list_free(having_vars);
+ }
+ }
+}
+
+/*
+ * add_vars_to_targetlist
+ * For each variable appearing in the list, add it to the owning
+ * relation's targetlist if not already present, and mark the variable
+ * as being needed for the indicated join (or for final output if
+ * where_needed includes "relation 0").
+ *
+ * The list may also contain PlaceHolderVars. These don't necessarily
+ * have a single owning relation; we keep their attr_needed info in
+ * root->placeholder_list instead. If create_new_ph is true, it's OK
+ * to create new PlaceHolderInfos; otherwise, the PlaceHolderInfos must
+ * already exist, and we should only update their ph_needed. (This should
+ * be true before deconstruct_jointree begins, and false after that.)
+ */
+void
+add_vars_to_targetlist(PlannerInfo *root, List *vars,
+ Relids where_needed, bool create_new_ph)
+{
+ ListCell *temp;
+
+ Assert(!bms_is_empty(where_needed));
+
+ foreach(temp, vars)
+ {
+ Node *node = (Node *) lfirst(temp);
+
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+ RelOptInfo *rel = find_base_rel(root, var->varno);
+ int attno = var->varattno;
+
+ if (bms_is_subset(where_needed, rel->relids))
+ continue;
+ Assert(attno >= rel->min_attr && attno <= rel->max_attr);
+ attno -= rel->min_attr;
+ if (rel->attr_needed[attno] == NULL)
+ {
+ /* Variable not yet requested, so add to rel's targetlist */
+ /* XXX is copyObject necessary here? */
+ rel->reltarget->exprs = lappend(rel->reltarget->exprs,
+ copyObject(var));
+ /* reltarget cost and width will be computed later */
+ }
+ rel->attr_needed[attno] = bms_add_members(rel->attr_needed[attno],
+ where_needed);
+ }
+ else if (IsA(node, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+ PlaceHolderInfo *phinfo = find_placeholder_info(root, phv,
+ create_new_ph);
+
+ phinfo->ph_needed = bms_add_members(phinfo->ph_needed,
+ where_needed);
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node));
+ }
+}
+
+
+/*****************************************************************************
+ *
+ * LATERAL REFERENCES
+ *
+ *****************************************************************************/
+
+/*
+ * find_lateral_references
+ * For each LATERAL subquery, extract all its references to Vars and
+ * PlaceHolderVars of the current query level, and make sure those values
+ * will be available for evaluation of the subquery.
+ *
+ * While later planning steps ensure that the Var/PHV source rels are on the
+ * outside of nestloops relative to the LATERAL subquery, we also need to
+ * ensure that the Vars/PHVs propagate up to the nestloop join level; this
+ * means setting suitable where_needed values for them.
+ *
+ * Note that this only deals with lateral references in unflattened LATERAL
+ * subqueries. When we flatten a LATERAL subquery, its lateral references
+ * become plain Vars in the parent query, but they may have to be wrapped in
+ * PlaceHolderVars if they need to be forced NULL by outer joins that don't
+ * also null the LATERAL subquery. That's all handled elsewhere.
+ *
+ * This has to run before deconstruct_jointree, since it might result in
+ * creation of PlaceHolderInfos.
+ */
+void
+find_lateral_references(PlannerInfo *root)
+{
+ Index rti;
+
+ /* We need do nothing if the query contains no LATERAL RTEs */
+ if (!root->hasLateralRTEs)
+ return;
+
+ /*
+ * Examine all baserels (the rel array has been set up by now).
+ */
+ for (rti = 1; rti < root->simple_rel_array_size; rti++)
+ {
+ RelOptInfo *brel = root->simple_rel_array[rti];
+
+ /* there may be empty slots corresponding to non-baserel RTEs */
+ if (brel == NULL)
+ continue;
+
+ Assert(brel->relid == rti); /* sanity check on array */
+
+ /*
+ * This bit is less obvious than it might look. We ignore appendrel
+ * otherrels and consider only their parent baserels. In a case where
+ * a LATERAL-containing UNION ALL subquery was pulled up, it is the
+ * otherrel that is actually going to be in the plan. However, we
+ * want to mark all its lateral references as needed by the parent,
+ * because it is the parent's relid that will be used for join
+ * planning purposes. And the parent's RTE will contain all the
+ * lateral references we need to know, since the pulled-up member is
+ * nothing but a copy of parts of the original RTE's subquery. We
+ * could visit the parent's children instead and transform their
+ * references back to the parent's relid, but it would be much more
+ * complicated for no real gain. (Important here is that the child
+ * members have not yet received any processing beyond being pulled
+ * up.) Similarly, in appendrels created by inheritance expansion,
+ * it's sufficient to look at the parent relation.
+ */
+
+ /* ignore RTEs that are "other rels" */
+ if (brel->reloptkind != RELOPT_BASEREL)
+ continue;
+
+ extract_lateral_references(root, brel, rti);
+ }
+}
+
+static void
+extract_lateral_references(PlannerInfo *root, RelOptInfo *brel, Index rtindex)
+{
+ RangeTblEntry *rte = root->simple_rte_array[rtindex];
+ List *vars;
+ List *newvars;
+ Relids where_needed;
+ ListCell *lc;
+
+ /* No cross-references are possible if it's not LATERAL */
+ if (!rte->lateral)
+ return;
+
+ /* Fetch the appropriate variables */
+ if (rte->rtekind == RTE_RELATION)
+ vars = pull_vars_of_level((Node *) rte->tablesample, 0);
+ else if (rte->rtekind == RTE_SUBQUERY)
+ vars = pull_vars_of_level((Node *) rte->subquery, 1);
+ else if (rte->rtekind == RTE_FUNCTION)
+ vars = pull_vars_of_level((Node *) rte->functions, 0);
+ else if (rte->rtekind == RTE_TABLEFUNC)
+ vars = pull_vars_of_level((Node *) rte->tablefunc, 0);
+ else if (rte->rtekind == RTE_VALUES)
+ vars = pull_vars_of_level((Node *) rte->values_lists, 0);
+ else
+ {
+ Assert(false);
+ return; /* keep compiler quiet */
+ }
+
+ if (vars == NIL)
+ return; /* nothing to do */
+
+ /* Copy each Var (or PlaceHolderVar) and adjust it to match our level */
+ newvars = NIL;
+ foreach(lc, vars)
+ {
+ Node *node = (Node *) lfirst(lc);
+
+ node = copyObject(node);
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+
+ /* Adjustment is easy since it's just one node */
+ var->varlevelsup = 0;
+ }
+ else if (IsA(node, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+ int levelsup = phv->phlevelsup;
+
+ /* Have to work harder to adjust the contained expression too */
+ if (levelsup != 0)
+ IncrementVarSublevelsUp(node, -levelsup, 0);
+
+ /*
+ * If we pulled the PHV out of a subquery RTE, its expression
+ * needs to be preprocessed. subquery_planner() already did this
+ * for level-zero PHVs in function and values RTEs, though.
+ */
+ if (levelsup > 0)
+ phv->phexpr = preprocess_phv_expression(root, phv->phexpr);
+ }
+ else
+ Assert(false);
+ newvars = lappend(newvars, node);
+ }
+
+ list_free(vars);
+
+ /*
+ * We mark the Vars as being "needed" at the LATERAL RTE. This is a bit
+ * of a cheat: a more formal approach would be to mark each one as needed
+ * at the join of the LATERAL RTE with its source RTE. But it will work,
+ * and it's much less tedious than computing a separate where_needed for
+ * each Var.
+ */
+ where_needed = bms_make_singleton(rtindex);
+
+ /*
+ * Push Vars into their source relations' targetlists, and PHVs into
+ * root->placeholder_list.
+ */
+ add_vars_to_targetlist(root, newvars, where_needed, true);
+
+ /* Remember the lateral references for create_lateral_join_info */
+ brel->lateral_vars = newvars;
+}
+
+/*
+ * create_lateral_join_info
+ * Fill in the per-base-relation direct_lateral_relids, lateral_relids
+ * and lateral_referencers sets.
+ *
+ * This has to run after deconstruct_jointree, because we need to know the
+ * final ph_eval_at values for PlaceHolderVars.
+ */
+void
+create_lateral_join_info(PlannerInfo *root)
+{
+ bool found_laterals = false;
+ Index rti;
+ ListCell *lc;
+
+ /* We need do nothing if the query contains no LATERAL RTEs */
+ if (!root->hasLateralRTEs)
+ return;
+
+ /*
+ * Examine all baserels (the rel array has been set up by now).
+ */
+ for (rti = 1; rti < root->simple_rel_array_size; rti++)
+ {
+ RelOptInfo *brel = root->simple_rel_array[rti];
+ Relids lateral_relids;
+
+ /* there may be empty slots corresponding to non-baserel RTEs */
+ if (brel == NULL)
+ continue;
+
+ Assert(brel->relid == rti); /* sanity check on array */
+
+ /* ignore RTEs that are "other rels" */
+ if (brel->reloptkind != RELOPT_BASEREL)
+ continue;
+
+ lateral_relids = NULL;
+
+ /* consider each laterally-referenced Var or PHV */
+ foreach(lc, brel->lateral_vars)
+ {
+ Node *node = (Node *) lfirst(lc);
+
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+
+ found_laterals = true;
+ lateral_relids = bms_add_member(lateral_relids,
+ var->varno);
+ }
+ else if (IsA(node, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+ PlaceHolderInfo *phinfo = find_placeholder_info(root, phv,
+ false);
+
+ found_laterals = true;
+ lateral_relids = bms_add_members(lateral_relids,
+ phinfo->ph_eval_at);
+ }
+ else
+ Assert(false);
+ }
+
+ /* We now have all the simple lateral refs from this rel */
+ brel->direct_lateral_relids = lateral_relids;
+ brel->lateral_relids = bms_copy(lateral_relids);
+ }
+
+ /*
+ * Now check for lateral references within PlaceHolderVars, and mark their
+ * eval_at rels as having lateral references to the source rels.
+ *
+ * For a PHV that is due to be evaluated at a baserel, mark its source(s)
+ * as direct lateral dependencies of the baserel (adding onto the ones
+ * recorded above). If it's due to be evaluated at a join, mark its
+ * source(s) as indirect lateral dependencies of each baserel in the join,
+ * ie put them into lateral_relids but not direct_lateral_relids. This is
+ * appropriate because we can't put any such baserel on the outside of a
+ * join to one of the PHV's lateral dependencies, but on the other hand we
+ * also can't yet join it directly to the dependency.
+ */
+ foreach(lc, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
+ Relids eval_at = phinfo->ph_eval_at;
+ int varno;
+
+ if (phinfo->ph_lateral == NULL)
+ continue; /* PHV is uninteresting if no lateral refs */
+
+ found_laterals = true;
+
+ if (bms_get_singleton_member(eval_at, &varno))
+ {
+ /* Evaluation site is a baserel */
+ RelOptInfo *brel = find_base_rel(root, varno);
+
+ brel->direct_lateral_relids =
+ bms_add_members(brel->direct_lateral_relids,
+ phinfo->ph_lateral);
+ brel->lateral_relids =
+ bms_add_members(brel->lateral_relids,
+ phinfo->ph_lateral);
+ }
+ else
+ {
+ /* Evaluation site is a join */
+ varno = -1;
+ while ((varno = bms_next_member(eval_at, varno)) >= 0)
+ {
+ RelOptInfo *brel = find_base_rel(root, varno);
+
+ brel->lateral_relids = bms_add_members(brel->lateral_relids,
+ phinfo->ph_lateral);
+ }
+ }
+ }
+
+ /*
+ * If we found no actual lateral references, we're done; but reset the
+ * hasLateralRTEs flag to avoid useless work later.
+ */
+ if (!found_laterals)
+ {
+ root->hasLateralRTEs = false;
+ return;
+ }
+
+ /*
+ * Calculate the transitive closure of the lateral_relids sets, so that
+ * they describe both direct and indirect lateral references. If relation
+ * X references Y laterally, and Y references Z laterally, then we will
+ * have to scan X on the inside of a nestloop with Z, so for all intents
+ * and purposes X is laterally dependent on Z too.
+ *
+ * This code is essentially Warshall's algorithm for transitive closure.
+ * The outer loop considers each baserel, and propagates its lateral
+ * dependencies to those baserels that have a lateral dependency on it.
+ */
+ for (rti = 1; rti < root->simple_rel_array_size; rti++)
+ {
+ RelOptInfo *brel = root->simple_rel_array[rti];
+ Relids outer_lateral_relids;
+ Index rti2;
+
+ if (brel == NULL || brel->reloptkind != RELOPT_BASEREL)
+ continue;
+
+ /* need not consider baserel further if it has no lateral refs */
+ outer_lateral_relids = brel->lateral_relids;
+ if (outer_lateral_relids == NULL)
+ continue;
+
+ /* else scan all baserels */
+ for (rti2 = 1; rti2 < root->simple_rel_array_size; rti2++)
+ {
+ RelOptInfo *brel2 = root->simple_rel_array[rti2];
+
+ if (brel2 == NULL || brel2->reloptkind != RELOPT_BASEREL)
+ continue;
+
+ /* if brel2 has lateral ref to brel, propagate brel's refs */
+ if (bms_is_member(rti, brel2->lateral_relids))
+ brel2->lateral_relids = bms_add_members(brel2->lateral_relids,
+ outer_lateral_relids);
+ }
+ }
+
+ /*
+ * Now that we've identified all lateral references, mark each baserel
+ * with the set of relids of rels that reference it laterally (possibly
+ * indirectly) --- that is, the inverse mapping of lateral_relids.
+ */
+ for (rti = 1; rti < root->simple_rel_array_size; rti++)
+ {
+ RelOptInfo *brel = root->simple_rel_array[rti];
+ Relids lateral_relids;
+ int rti2;
+
+ if (brel == NULL || brel->reloptkind != RELOPT_BASEREL)
+ continue;
+
+ /* Nothing to do at rels with no lateral refs */
+ lateral_relids = brel->lateral_relids;
+ if (lateral_relids == NULL)
+ continue;
+
+ /*
+ * We should not have broken the invariant that lateral_relids is
+ * exactly NULL if empty.
+ */
+ Assert(!bms_is_empty(lateral_relids));
+
+ /* Also, no rel should have a lateral dependency on itself */
+ Assert(!bms_is_member(rti, lateral_relids));
+
+ /* Mark this rel's referencees */
+ rti2 = -1;
+ while ((rti2 = bms_next_member(lateral_relids, rti2)) >= 0)
+ {
+ RelOptInfo *brel2 = root->simple_rel_array[rti2];
+
+ Assert(brel2 != NULL && brel2->reloptkind == RELOPT_BASEREL);
+ brel2->lateral_referencers =
+ bms_add_member(brel2->lateral_referencers, rti);
+ }
+ }
+}
+
+
+/*****************************************************************************
+ *
+ * JOIN TREE PROCESSING
+ *
+ *****************************************************************************/
+
+/*
+ * deconstruct_jointree
+ * Recursively scan the query's join tree for WHERE and JOIN/ON qual
+ * clauses, and add these to the appropriate restrictinfo and joininfo
+ * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes
+ * to root->join_info_list for any outer joins appearing in the query tree.
+ * Return a "joinlist" data structure showing the join order decisions
+ * that need to be made by make_one_rel().
+ *
+ * The "joinlist" result is a list of items that are either RangeTblRef
+ * jointree nodes or sub-joinlists. All the items at the same level of
+ * joinlist must be joined in an order to be determined by make_one_rel()
+ * (note that legal orders may be constrained by SpecialJoinInfo nodes).
+ * A sub-joinlist represents a subproblem to be planned separately. Currently
+ * sub-joinlists arise only from FULL OUTER JOIN or when collapsing of
+ * subproblems is stopped by join_collapse_limit or from_collapse_limit.
+ *
+ * NOTE: when dealing with inner joins, it is appropriate to let a qual clause
+ * be evaluated at the lowest level where all the variables it mentions are
+ * available. However, we cannot push a qual down into the nullable side(s)
+ * of an outer join since the qual might eliminate matching rows and cause a
+ * NULL row to be incorrectly emitted by the join. Therefore, we artificially
+ * OR the minimum-relids of such an outer join into the required_relids of
+ * clauses appearing above it. This forces those clauses to be delayed until
+ * application of the outer join (or maybe even higher in the join tree).
+ */
+List *
+deconstruct_jointree(PlannerInfo *root)
+{
+ List *result;
+ Relids qualscope;
+ Relids inner_join_rels;
+ List *postponed_qual_list = NIL;
+
+ /* Start recursion at top of jointree */
+ Assert(root->parse->jointree != NULL &&
+ IsA(root->parse->jointree, FromExpr));
+
+ /* this is filled as we scan the jointree */
+ root->nullable_baserels = NULL;
+
+ result = deconstruct_recurse(root, (Node *) root->parse->jointree, false,
+ &qualscope, &inner_join_rels,
+ &postponed_qual_list);
+
+ /* Shouldn't be any leftover quals */
+ Assert(postponed_qual_list == NIL);
+
+ return result;
+}
+
+/*
+ * deconstruct_recurse
+ * One recursion level of deconstruct_jointree processing.
+ *
+ * Inputs:
+ * jtnode is the jointree node to examine
+ * below_outer_join is true if this node is within the nullable side of a
+ * higher-level outer join
+ * Outputs:
+ * *qualscope gets the set of base Relids syntactically included in this
+ * jointree node (do not modify or free this, as it may also be pointed
+ * to by RestrictInfo and SpecialJoinInfo nodes)
+ * *inner_join_rels gets the set of base Relids syntactically included in
+ * inner joins appearing at or below this jointree node (do not modify
+ * or free this, either)
+ * *postponed_qual_list is a list of PostponedQual structs, which we can
+ * add quals to if they turn out to belong to a higher join level
+ * Return value is the appropriate joinlist for this jointree node
+ *
+ * In addition, entries will be added to root->join_info_list for outer joins.
+ */
+static List *
+deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
+ Relids *qualscope, Relids *inner_join_rels,
+ List **postponed_qual_list)
+{
+ List *joinlist;
+
+ if (jtnode == NULL)
+ {
+ *qualscope = NULL;
+ *inner_join_rels = NULL;
+ return NIL;
+ }
+ if (IsA(jtnode, RangeTblRef))
+ {
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+
+ /* qualscope is just the one RTE */
+ *qualscope = bms_make_singleton(varno);
+ /* Deal with any securityQuals attached to the RTE */
+ if (root->qual_security_level > 0)
+ process_security_barrier_quals(root,
+ varno,
+ *qualscope,
+ below_outer_join);
+ /* A single baserel does not create an inner join */
+ *inner_join_rels = NULL;
+ joinlist = list_make1(jtnode);
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ List *child_postponed_quals = NIL;
+ int remaining;
+ ListCell *l;
+
+ /*
+ * First, recurse to handle child joins. We collapse subproblems into
+ * a single joinlist whenever the resulting joinlist wouldn't exceed
+ * from_collapse_limit members. Also, always collapse one-element
+ * subproblems, since that won't lengthen the joinlist anyway.
+ */
+ *qualscope = NULL;
+ *inner_join_rels = NULL;
+ joinlist = NIL;
+ remaining = list_length(f->fromlist);
+ foreach(l, f->fromlist)
+ {
+ Relids sub_qualscope;
+ List *sub_joinlist;
+ int sub_members;
+
+ sub_joinlist = deconstruct_recurse(root, lfirst(l),
+ below_outer_join,
+ &sub_qualscope,
+ inner_join_rels,
+ &child_postponed_quals);
+ *qualscope = bms_add_members(*qualscope, sub_qualscope);
+ sub_members = list_length(sub_joinlist);
+ remaining--;
+ if (sub_members <= 1 ||
+ list_length(joinlist) + sub_members + remaining <= from_collapse_limit)
+ joinlist = list_concat(joinlist, sub_joinlist);
+ else
+ joinlist = lappend(joinlist, sub_joinlist);
+ }
+
+ /*
+ * A FROM with more than one list element is an inner join subsuming
+ * all below it, so we should report inner_join_rels = qualscope. If
+ * there was exactly one element, we should (and already did) report
+ * whatever its inner_join_rels were. If there were no elements (is
+ * that still possible?) the initialization before the loop fixed it.
+ */
+ if (list_length(f->fromlist) > 1)
+ *inner_join_rels = *qualscope;
+
+ /*
+ * Try to process any quals postponed by children. If they need
+ * further postponement, add them to my output postponed_qual_list.
+ */
+ foreach(l, child_postponed_quals)
+ {
+ PostponedQual *pq = (PostponedQual *) lfirst(l);
+
+ if (bms_is_subset(pq->relids, *qualscope))
+ distribute_qual_to_rels(root, pq->qual,
+ below_outer_join, JOIN_INNER,
+ root->qual_security_level,
+ *qualscope, NULL, NULL,
+ NULL);
+ else
+ *postponed_qual_list = lappend(*postponed_qual_list, pq);
+ }
+
+ /*
+ * Now process the top-level quals.
+ */
+ foreach(l, (List *) f->quals)
+ {
+ Node *qual = (Node *) lfirst(l);
+
+ distribute_qual_to_rels(root, qual,
+ below_outer_join, JOIN_INNER,
+ root->qual_security_level,
+ *qualscope, NULL, NULL,
+ postponed_qual_list);
+ }
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+ List *child_postponed_quals = NIL;
+ Relids leftids,
+ rightids,
+ left_inners,
+ right_inners,
+ nonnullable_rels,
+ nullable_rels,
+ ojscope;
+ List *leftjoinlist,
+ *rightjoinlist;
+ List *my_quals;
+ SpecialJoinInfo *sjinfo;
+ ListCell *l;
+
+ /*
+ * Order of operations here is subtle and critical. First we recurse
+ * to handle sub-JOINs. Their join quals will be placed without
+ * regard for whether this level is an outer join, which is correct.
+ * Then we place our own join quals, which are restricted by lower
+ * outer joins in any case, and are forced to this level if this is an
+ * outer join and they mention the outer side. Finally, if this is an
+ * outer join, we create a join_info_list entry for the join. This
+ * will prevent quals above us in the join tree that use those rels
+ * from being pushed down below this level. (It's okay for upper
+ * quals to be pushed down to the outer side, however.)
+ */
+ switch (j->jointype)
+ {
+ case JOIN_INNER:
+ leftjoinlist = deconstruct_recurse(root, j->larg,
+ below_outer_join,
+ &leftids, &left_inners,
+ &child_postponed_quals);
+ rightjoinlist = deconstruct_recurse(root, j->rarg,
+ below_outer_join,
+ &rightids, &right_inners,
+ &child_postponed_quals);
+ *qualscope = bms_union(leftids, rightids);
+ *inner_join_rels = *qualscope;
+ /* Inner join adds no restrictions for quals */
+ nonnullable_rels = NULL;
+ /* and it doesn't force anything to null, either */
+ nullable_rels = NULL;
+ break;
+ case JOIN_LEFT:
+ case JOIN_ANTI:
+ leftjoinlist = deconstruct_recurse(root, j->larg,
+ below_outer_join,
+ &leftids, &left_inners,
+ &child_postponed_quals);
+ rightjoinlist = deconstruct_recurse(root, j->rarg,
+ true,
+ &rightids, &right_inners,
+ &child_postponed_quals);
+ *qualscope = bms_union(leftids, rightids);
+ *inner_join_rels = bms_union(left_inners, right_inners);
+ nonnullable_rels = leftids;
+ nullable_rels = rightids;
+ break;
+ case JOIN_SEMI:
+ leftjoinlist = deconstruct_recurse(root, j->larg,
+ below_outer_join,
+ &leftids, &left_inners,
+ &child_postponed_quals);
+ rightjoinlist = deconstruct_recurse(root, j->rarg,
+ below_outer_join,
+ &rightids, &right_inners,
+ &child_postponed_quals);
+ *qualscope = bms_union(leftids, rightids);
+ *inner_join_rels = bms_union(left_inners, right_inners);
+ /* Semi join adds no restrictions for quals */
+ nonnullable_rels = NULL;
+
+ /*
+ * Theoretically, a semijoin would null the RHS; but since the
+ * RHS can't be accessed above the join, this is immaterial
+ * and we needn't account for it.
+ */
+ nullable_rels = NULL;
+ break;
+ case JOIN_FULL:
+ leftjoinlist = deconstruct_recurse(root, j->larg,
+ true,
+ &leftids, &left_inners,
+ &child_postponed_quals);
+ rightjoinlist = deconstruct_recurse(root, j->rarg,
+ true,
+ &rightids, &right_inners,
+ &child_postponed_quals);
+ *qualscope = bms_union(leftids, rightids);
+ *inner_join_rels = bms_union(left_inners, right_inners);
+ /* each side is both outer and inner */
+ nonnullable_rels = *qualscope;
+ nullable_rels = *qualscope;
+ break;
+ default:
+ /* JOIN_RIGHT was eliminated during reduce_outer_joins() */
+ elog(ERROR, "unrecognized join type: %d",
+ (int) j->jointype);
+ nonnullable_rels = NULL; /* keep compiler quiet */
+ nullable_rels = NULL;
+ leftjoinlist = rightjoinlist = NIL;
+ break;
+ }
+
+ /* Report all rels that will be nulled anywhere in the jointree */
+ root->nullable_baserels = bms_add_members(root->nullable_baserels,
+ nullable_rels);
+
+ /*
+ * Try to process any quals postponed by children. If they need
+ * further postponement, add them to my output postponed_qual_list.
+ * Quals that can be processed now must be included in my_quals, so
+ * that they'll be handled properly in make_outerjoininfo.
+ */
+ my_quals = NIL;
+ foreach(l, child_postponed_quals)
+ {
+ PostponedQual *pq = (PostponedQual *) lfirst(l);
+
+ if (bms_is_subset(pq->relids, *qualscope))
+ my_quals = lappend(my_quals, pq->qual);
+ else
+ {
+ /*
+ * We should not be postponing any quals past an outer join.
+ * If this Assert fires, pull_up_subqueries() messed up.
+ */
+ Assert(j->jointype == JOIN_INNER);
+ *postponed_qual_list = lappend(*postponed_qual_list, pq);
+ }
+ }
+ my_quals = list_concat(my_quals, (List *) j->quals);
+
+ /*
+ * For an OJ, form the SpecialJoinInfo now, because we need the OJ's
+ * semantic scope (ojscope) to pass to distribute_qual_to_rels. But
+ * we mustn't add it to join_info_list just yet, because we don't want
+ * distribute_qual_to_rels to think it is an outer join below us.
+ *
+ * Semijoins are a bit of a hybrid: we build a SpecialJoinInfo, but we
+ * want ojscope = NULL for distribute_qual_to_rels.
+ */
+ if (j->jointype != JOIN_INNER)
+ {
+ sjinfo = make_outerjoininfo(root,
+ leftids, rightids,
+ *inner_join_rels,
+ j->jointype,
+ my_quals);
+ if (j->jointype == JOIN_SEMI)
+ ojscope = NULL;
+ else
+ ojscope = bms_union(sjinfo->min_lefthand,
+ sjinfo->min_righthand);
+ }
+ else
+ {
+ sjinfo = NULL;
+ ojscope = NULL;
+ }
+
+ /* Process the JOIN's qual clauses */
+ foreach(l, my_quals)
+ {
+ Node *qual = (Node *) lfirst(l);
+
+ distribute_qual_to_rels(root, qual,
+ below_outer_join, j->jointype,
+ root->qual_security_level,
+ *qualscope,
+ ojscope, nonnullable_rels,
+ postponed_qual_list);
+ }
+
+ /* Now we can add the SpecialJoinInfo to join_info_list */
+ if (sjinfo)
+ {
+ root->join_info_list = lappend(root->join_info_list, sjinfo);
+ /* Each time we do that, recheck placeholder eval levels */
+ update_placeholder_eval_levels(root, sjinfo);
+ }
+
+ /*
+ * Finally, compute the output joinlist. We fold subproblems together
+ * except at a FULL JOIN or where join_collapse_limit would be
+ * exceeded.
+ */
+ if (j->jointype == JOIN_FULL)
+ {
+ /* force the join order exactly at this node */
+ joinlist = list_make1(list_make2(leftjoinlist, rightjoinlist));
+ }
+ else if (list_length(leftjoinlist) + list_length(rightjoinlist) <=
+ join_collapse_limit)
+ {
+ /* OK to combine subproblems */
+ joinlist = list_concat(leftjoinlist, rightjoinlist);
+ }
+ else
+ {
+ /* can't combine, but needn't force join order above here */
+ Node *leftpart,
+ *rightpart;
+
+ /* avoid creating useless 1-element sublists */
+ if (list_length(leftjoinlist) == 1)
+ leftpart = (Node *) linitial(leftjoinlist);
+ else
+ leftpart = (Node *) leftjoinlist;
+ if (list_length(rightjoinlist) == 1)
+ rightpart = (Node *) linitial(rightjoinlist);
+ else
+ rightpart = (Node *) rightjoinlist;
+ joinlist = list_make2(leftpart, rightpart);
+ }
+ }
+ else
+ {
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+ joinlist = NIL; /* keep compiler quiet */
+ }
+ return joinlist;
+}
+
+/*
+ * process_security_barrier_quals
+ * Transfer security-barrier quals into relation's baserestrictinfo list.
+ *
+ * The rewriter put any relevant security-barrier conditions into the RTE's
+ * securityQuals field, but it's now time to copy them into the rel's
+ * baserestrictinfo.
+ *
+ * In inheritance cases, we only consider quals attached to the parent rel
+ * here; they will be valid for all children too, so it's okay to consider
+ * them for purposes like equivalence class creation. Quals attached to
+ * individual child rels will be dealt with during path creation.
+ */
+static void
+process_security_barrier_quals(PlannerInfo *root,
+ int rti, Relids qualscope,
+ bool below_outer_join)
+{
+ RangeTblEntry *rte = root->simple_rte_array[rti];
+ Index security_level = 0;
+ ListCell *lc;
+
+ /*
+ * Each element of the securityQuals list has been preprocessed into an
+ * implicitly-ANDed list of clauses. All the clauses in a given sublist
+ * should get the same security level, but successive sublists get higher
+ * levels.
+ */
+ foreach(lc, rte->securityQuals)
+ {
+ List *qualset = (List *) lfirst(lc);
+ ListCell *lc2;
+
+ foreach(lc2, qualset)
+ {
+ Node *qual = (Node *) lfirst(lc2);
+
+ /*
+ * We cheat to the extent of passing ojscope = qualscope rather
+ * than its more logical value of NULL. The only effect this has
+ * is to force a Var-free qual to be evaluated at the rel rather
+ * than being pushed up to top of tree, which we don't want.
+ */
+ distribute_qual_to_rels(root, qual,
+ below_outer_join,
+ JOIN_INNER,
+ security_level,
+ qualscope,
+ qualscope,
+ NULL,
+ NULL);
+ }
+ security_level++;
+ }
+
+ /* Assert that qual_security_level is higher than anything we just used */
+ Assert(security_level <= root->qual_security_level);
+}
+
+/*
+ * make_outerjoininfo
+ * Build a SpecialJoinInfo for the current outer join
+ *
+ * Inputs:
+ * left_rels: the base Relids syntactically on outer side of join
+ * right_rels: the base Relids syntactically on inner side of join
+ * inner_join_rels: base Relids participating in inner joins below this one
+ * jointype: what it says (must always be LEFT, FULL, SEMI, or ANTI)
+ * clause: the outer join's join condition (in implicit-AND format)
+ *
+ * The node should eventually be appended to root->join_info_list, but we
+ * do not do that here.
+ *
+ * Note: we assume that this function is invoked bottom-up, so that
+ * root->join_info_list already contains entries for all outer joins that are
+ * syntactically below this one.
+ */
+static SpecialJoinInfo *
+make_outerjoininfo(PlannerInfo *root,
+ Relids left_rels, Relids right_rels,
+ Relids inner_join_rels,
+ JoinType jointype, List *clause)
+{
+ SpecialJoinInfo *sjinfo = makeNode(SpecialJoinInfo);
+ Relids clause_relids;
+ Relids strict_relids;
+ Relids min_lefthand;
+ Relids min_righthand;
+ ListCell *l;
+
+ /*
+ * We should not see RIGHT JOIN here because left/right were switched
+ * earlier
+ */
+ Assert(jointype != JOIN_INNER);
+ Assert(jointype != JOIN_RIGHT);
+
+ /*
+ * Presently the executor cannot support FOR [KEY] UPDATE/SHARE marking of
+ * rels appearing on the nullable side of an outer join. (It's somewhat
+ * unclear what that would mean, anyway: what should we mark when a result
+ * row is generated from no element of the nullable relation?) So,
+ * complain if any nullable rel is FOR [KEY] UPDATE/SHARE.
+ *
+ * You might be wondering why this test isn't made far upstream in the
+ * parser. It's because the parser hasn't got enough info --- consider
+ * FOR UPDATE applied to a view. Only after rewriting and flattening do
+ * we know whether the view contains an outer join.
+ *
+ * We use the original RowMarkClause list here; the PlanRowMark list would
+ * list everything.
+ */
+ foreach(l, root->parse->rowMarks)
+ {
+ RowMarkClause *rc = (RowMarkClause *) lfirst(l);
+
+ if (bms_is_member(rc->rti, right_rels) ||
+ (jointype == JOIN_FULL && bms_is_member(rc->rti, left_rels)))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
+ errmsg("%s cannot be applied to the nullable side of an outer join",
+ LCS_asString(rc->strength))));
+ }
+
+ sjinfo->syn_lefthand = left_rels;
+ sjinfo->syn_righthand = right_rels;
+ sjinfo->jointype = jointype;
+ /* this always starts out false */
+ sjinfo->delay_upper_joins = false;
+
+ compute_semijoin_info(root, sjinfo, clause);
+
+ /* If it's a full join, no need to be very smart */
+ if (jointype == JOIN_FULL)
+ {
+ sjinfo->min_lefthand = bms_copy(left_rels);
+ sjinfo->min_righthand = bms_copy(right_rels);
+ sjinfo->lhs_strict = false; /* don't care about this */
+ return sjinfo;
+ }
+
+ /*
+ * Retrieve all relids mentioned within the join clause.
+ */
+ clause_relids = pull_varnos(root, (Node *) clause);
+
+ /*
+ * For which relids is the clause strict, ie, it cannot succeed if the
+ * rel's columns are all NULL?
+ */
+ strict_relids = find_nonnullable_rels((Node *) clause);
+
+ /* Remember whether the clause is strict for any LHS relations */
+ sjinfo->lhs_strict = bms_overlap(strict_relids, left_rels);
+
+ /*
+ * Required LHS always includes the LHS rels mentioned in the clause. We
+ * may have to add more rels based on lower outer joins; see below.
+ */
+ min_lefthand = bms_intersect(clause_relids, left_rels);
+
+ /*
+ * Similarly for required RHS. But here, we must also include any lower
+ * inner joins, to ensure we don't try to commute with any of them.
+ */
+ min_righthand = bms_int_members(bms_union(clause_relids, inner_join_rels),
+ right_rels);
+
+ /*
+ * Now check previous outer joins for ordering restrictions.
+ */
+ foreach(l, root->join_info_list)
+ {
+ SpecialJoinInfo *otherinfo = (SpecialJoinInfo *) lfirst(l);
+
+ /*
+ * A full join is an optimization barrier: we can't associate into or
+ * out of it. Hence, if it overlaps either LHS or RHS of the current
+ * rel, expand that side's min relset to cover the whole full join.
+ */
+ if (otherinfo->jointype == JOIN_FULL)
+ {
+ if (bms_overlap(left_rels, otherinfo->syn_lefthand) ||
+ bms_overlap(left_rels, otherinfo->syn_righthand))
+ {
+ min_lefthand = bms_add_members(min_lefthand,
+ otherinfo->syn_lefthand);
+ min_lefthand = bms_add_members(min_lefthand,
+ otherinfo->syn_righthand);
+ }
+ if (bms_overlap(right_rels, otherinfo->syn_lefthand) ||
+ bms_overlap(right_rels, otherinfo->syn_righthand))
+ {
+ min_righthand = bms_add_members(min_righthand,
+ otherinfo->syn_lefthand);
+ min_righthand = bms_add_members(min_righthand,
+ otherinfo->syn_righthand);
+ }
+ /* Needn't do anything else with the full join */
+ continue;
+ }
+
+ /*
+ * For a lower OJ in our LHS, if our join condition uses the lower
+ * join's RHS and is not strict for that rel, we must preserve the
+ * ordering of the two OJs, so add lower OJ's full syntactic relset to
+ * min_lefthand. (We must use its full syntactic relset, not just its
+ * min_lefthand + min_righthand. This is because there might be other
+ * OJs below this one that this one can commute with, but we cannot
+ * commute with them if we don't with this one.) Also, if the current
+ * join is a semijoin or antijoin, we must preserve ordering
+ * regardless of strictness.
+ *
+ * Note: I believe we have to insist on being strict for at least one
+ * rel in the lower OJ's min_righthand, not its whole syn_righthand.
+ */
+ if (bms_overlap(left_rels, otherinfo->syn_righthand))
+ {
+ if (bms_overlap(clause_relids, otherinfo->syn_righthand) &&
+ (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
+ !bms_overlap(strict_relids, otherinfo->min_righthand)))
+ {
+ min_lefthand = bms_add_members(min_lefthand,
+ otherinfo->syn_lefthand);
+ min_lefthand = bms_add_members(min_lefthand,
+ otherinfo->syn_righthand);
+ }
+ }
+
+ /*
+ * For a lower OJ in our RHS, if our join condition does not use the
+ * lower join's RHS and the lower OJ's join condition is strict, we
+ * can interchange the ordering of the two OJs; otherwise we must add
+ * the lower OJ's full syntactic relset to min_righthand.
+ *
+ * Also, if our join condition does not use the lower join's LHS
+ * either, force the ordering to be preserved. Otherwise we can end
+ * up with SpecialJoinInfos with identical min_righthands, which can
+ * confuse join_is_legal (see discussion in backend/optimizer/README).
+ *
+ * Also, we must preserve ordering anyway if either the current join
+ * or the lower OJ is either a semijoin or an antijoin.
+ *
+ * Here, we have to consider that "our join condition" includes any
+ * clauses that syntactically appeared above the lower OJ and below
+ * ours; those are equivalent to degenerate clauses in our OJ and must
+ * be treated as such. Such clauses obviously can't reference our
+ * LHS, and they must be non-strict for the lower OJ's RHS (else
+ * reduce_outer_joins would have reduced the lower OJ to a plain
+ * join). Hence the other ways in which we handle clauses within our
+ * join condition are not affected by them. The net effect is
+ * therefore sufficiently represented by the delay_upper_joins flag
+ * saved for us by check_outerjoin_delay.
+ */
+ if (bms_overlap(right_rels, otherinfo->syn_righthand))
+ {
+ if (bms_overlap(clause_relids, otherinfo->syn_righthand) ||
+ !bms_overlap(clause_relids, otherinfo->min_lefthand) ||
+ jointype == JOIN_SEMI ||
+ jointype == JOIN_ANTI ||
+ otherinfo->jointype == JOIN_SEMI ||
+ otherinfo->jointype == JOIN_ANTI ||
+ !otherinfo->lhs_strict || otherinfo->delay_upper_joins)
+ {
+ min_righthand = bms_add_members(min_righthand,
+ otherinfo->syn_lefthand);
+ min_righthand = bms_add_members(min_righthand,
+ otherinfo->syn_righthand);
+ }
+ }
+ }
+
+ /*
+ * Examine PlaceHolderVars. If a PHV is supposed to be evaluated within
+ * this join's nullable side, then ensure that min_righthand contains the
+ * full eval_at set of the PHV. This ensures that the PHV actually can be
+ * evaluated within the RHS. Note that this works only because we should
+ * already have determined the final eval_at level for any PHV
+ * syntactically within this join.
+ */
+ foreach(l, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l);
+ Relids ph_syn_level = phinfo->ph_var->phrels;
+
+ /* Ignore placeholder if it didn't syntactically come from RHS */
+ if (!bms_is_subset(ph_syn_level, right_rels))
+ continue;
+
+ /* Else, prevent join from being formed before we eval the PHV */
+ min_righthand = bms_add_members(min_righthand, phinfo->ph_eval_at);
+ }
+
+ /*
+ * If we found nothing to put in min_lefthand, punt and make it the full
+ * LHS, to avoid having an empty min_lefthand which will confuse later
+ * processing. (We don't try to be smart about such cases, just correct.)
+ * Likewise for min_righthand.
+ */
+ if (bms_is_empty(min_lefthand))
+ min_lefthand = bms_copy(left_rels);
+ if (bms_is_empty(min_righthand))
+ min_righthand = bms_copy(right_rels);
+
+ /* Now they'd better be nonempty */
+ Assert(!bms_is_empty(min_lefthand));
+ Assert(!bms_is_empty(min_righthand));
+ /* Shouldn't overlap either */
+ Assert(!bms_overlap(min_lefthand, min_righthand));
+
+ sjinfo->min_lefthand = min_lefthand;
+ sjinfo->min_righthand = min_righthand;
+
+ return sjinfo;
+}
+
+/*
+ * compute_semijoin_info
+ * Fill semijoin-related fields of a new SpecialJoinInfo
+ *
+ * Note: this relies on only the jointype and syn_righthand fields of the
+ * SpecialJoinInfo; the rest may not be set yet.
+ */
+static void
+compute_semijoin_info(PlannerInfo *root, SpecialJoinInfo *sjinfo, List *clause)
+{
+ List *semi_operators;
+ List *semi_rhs_exprs;
+ bool all_btree;
+ bool all_hash;
+ ListCell *lc;
+
+ /* Initialize semijoin-related fields in case we can't unique-ify */
+ sjinfo->semi_can_btree = false;
+ sjinfo->semi_can_hash = false;
+ sjinfo->semi_operators = NIL;
+ sjinfo->semi_rhs_exprs = NIL;
+
+ /* Nothing more to do if it's not a semijoin */
+ if (sjinfo->jointype != JOIN_SEMI)
+ return;
+
+ /*
+ * Look to see whether the semijoin's join quals consist of AND'ed
+ * equality operators, with (only) RHS variables on only one side of each
+ * one. If so, we can figure out how to enforce uniqueness for the RHS.
+ *
+ * Note that the input clause list is the list of quals that are
+ * *syntactically* associated with the semijoin, which in practice means
+ * the synthesized comparison list for an IN or the WHERE of an EXISTS.
+ * Particularly in the latter case, it might contain clauses that aren't
+ * *semantically* associated with the join, but refer to just one side or
+ * the other. We can ignore such clauses here, as they will just drop
+ * down to be processed within one side or the other. (It is okay to
+ * consider only the syntactically-associated clauses here because for a
+ * semijoin, no higher-level quals could refer to the RHS, and so there
+ * can be no other quals that are semantically associated with this join.
+ * We do things this way because it is useful to have the set of potential
+ * unique-ification expressions before we can extract the list of quals
+ * that are actually semantically associated with the particular join.)
+ *
+ * Note that the semi_operators list consists of the joinqual operators
+ * themselves (but commuted if needed to put the RHS value on the right).
+ * These could be cross-type operators, in which case the operator
+ * actually needed for uniqueness is a related single-type operator. We
+ * assume here that that operator will be available from the btree or hash
+ * opclass when the time comes ... if not, create_unique_plan() will fail.
+ */
+ semi_operators = NIL;
+ semi_rhs_exprs = NIL;
+ all_btree = true;
+ all_hash = enable_hashagg; /* don't consider hash if not enabled */
+ foreach(lc, clause)
+ {
+ OpExpr *op = (OpExpr *) lfirst(lc);
+ Oid opno;
+ Node *left_expr;
+ Node *right_expr;
+ Relids left_varnos;
+ Relids right_varnos;
+ Relids all_varnos;
+ Oid opinputtype;
+
+ /* Is it a binary opclause? */
+ if (!IsA(op, OpExpr) ||
+ list_length(op->args) != 2)
+ {
+ /* No, but does it reference both sides? */
+ all_varnos = pull_varnos(root, (Node *) op);
+ if (!bms_overlap(all_varnos, sjinfo->syn_righthand) ||
+ bms_is_subset(all_varnos, sjinfo->syn_righthand))
+ {
+ /*
+ * Clause refers to only one rel, so ignore it --- unless it
+ * contains volatile functions, in which case we'd better
+ * punt.
+ */
+ if (contain_volatile_functions((Node *) op))
+ return;
+ continue;
+ }
+ /* Non-operator clause referencing both sides, must punt */
+ return;
+ }
+
+ /* Extract data from binary opclause */
+ opno = op->opno;
+ left_expr = linitial(op->args);
+ right_expr = lsecond(op->args);
+ left_varnos = pull_varnos(root, left_expr);
+ right_varnos = pull_varnos(root, right_expr);
+ all_varnos = bms_union(left_varnos, right_varnos);
+ opinputtype = exprType(left_expr);
+
+ /* Does it reference both sides? */
+ if (!bms_overlap(all_varnos, sjinfo->syn_righthand) ||
+ bms_is_subset(all_varnos, sjinfo->syn_righthand))
+ {
+ /*
+ * Clause refers to only one rel, so ignore it --- unless it
+ * contains volatile functions, in which case we'd better punt.
+ */
+ if (contain_volatile_functions((Node *) op))
+ return;
+ continue;
+ }
+
+ /* check rel membership of arguments */
+ if (!bms_is_empty(right_varnos) &&
+ bms_is_subset(right_varnos, sjinfo->syn_righthand) &&
+ !bms_overlap(left_varnos, sjinfo->syn_righthand))
+ {
+ /* typical case, right_expr is RHS variable */
+ }
+ else if (!bms_is_empty(left_varnos) &&
+ bms_is_subset(left_varnos, sjinfo->syn_righthand) &&
+ !bms_overlap(right_varnos, sjinfo->syn_righthand))
+ {
+ /* flipped case, left_expr is RHS variable */
+ opno = get_commutator(opno);
+ if (!OidIsValid(opno))
+ return;
+ right_expr = left_expr;
+ }
+ else
+ {
+ /* mixed membership of args, punt */
+ return;
+ }
+
+ /* all operators must be btree equality or hash equality */
+ if (all_btree)
+ {
+ /* oprcanmerge is considered a hint... */
+ if (!op_mergejoinable(opno, opinputtype) ||
+ get_mergejoin_opfamilies(opno) == NIL)
+ all_btree = false;
+ }
+ if (all_hash)
+ {
+ /* ... but oprcanhash had better be correct */
+ if (!op_hashjoinable(opno, opinputtype))
+ all_hash = false;
+ }
+ if (!(all_btree || all_hash))
+ return;
+
+ /* so far so good, keep building lists */
+ semi_operators = lappend_oid(semi_operators, opno);
+ semi_rhs_exprs = lappend(semi_rhs_exprs, copyObject(right_expr));
+ }
+
+ /* Punt if we didn't find at least one column to unique-ify */
+ if (semi_rhs_exprs == NIL)
+ return;
+
+ /*
+ * The expressions we'd need to unique-ify mustn't be volatile.
+ */
+ if (contain_volatile_functions((Node *) semi_rhs_exprs))
+ return;
+
+ /*
+ * If we get here, we can unique-ify the semijoin's RHS using at least one
+ * of sorting and hashing. Save the information about how to do that.
+ */
+ sjinfo->semi_can_btree = all_btree;
+ sjinfo->semi_can_hash = all_hash;
+ sjinfo->semi_operators = semi_operators;
+ sjinfo->semi_rhs_exprs = semi_rhs_exprs;
+}
+
+
+/*****************************************************************************
+ *
+ * QUALIFICATIONS
+ *
+ *****************************************************************************/
+
+/*
+ * distribute_qual_to_rels
+ * Add clause information to either the baserestrictinfo or joininfo list
+ * (depending on whether the clause is a join) of each base relation
+ * mentioned in the clause. A RestrictInfo node is created and added to
+ * the appropriate list for each rel. Alternatively, if the clause uses a
+ * mergejoinable operator and is not delayed by outer-join rules, enter
+ * the left- and right-side expressions into the query's list of
+ * EquivalenceClasses. Alternatively, if the clause needs to be treated
+ * as belonging to a higher join level, just add it to postponed_qual_list.
+ *
+ * 'clause': the qual clause to be distributed
+ * 'below_outer_join': true if the qual is from a JOIN/ON that is below the
+ * nullable side of a higher-level outer join
+ * 'jointype': type of join the qual is from (JOIN_INNER for a WHERE clause)
+ * 'security_level': security_level to assign to the qual
+ * 'qualscope': set of baserels the qual's syntactic scope covers
+ * 'ojscope': NULL if not an outer-join qual, else the minimum set of baserels
+ * needed to form this join
+ * 'outerjoin_nonnullable': NULL if not an outer-join qual, else the set of
+ * baserels appearing on the outer (nonnullable) side of the join
+ * (for FULL JOIN this includes both sides of the join, and must in fact
+ * equal qualscope)
+ * 'postponed_qual_list': list of PostponedQual structs, which we can add
+ * this qual to if it turns out to belong to a higher join level.
+ * Can be NULL if caller knows postponement is impossible.
+ *
+ * 'qualscope' identifies what level of JOIN the qual came from syntactically.
+ * 'ojscope' is needed if we decide to force the qual up to the outer-join
+ * level, which will be ojscope not necessarily qualscope.
+ *
+ * At the time this is called, root->join_info_list must contain entries for
+ * all and only those special joins that are syntactically below this qual.
+ */
+static void
+distribute_qual_to_rels(PlannerInfo *root, Node *clause,
+ bool below_outer_join,
+ JoinType jointype,
+ Index security_level,
+ Relids qualscope,
+ Relids ojscope,
+ Relids outerjoin_nonnullable,
+ List **postponed_qual_list)
+{
+ Relids relids;
+ bool is_pushed_down;
+ bool outerjoin_delayed;
+ bool pseudoconstant = false;
+ bool maybe_equivalence;
+ bool maybe_outer_join;
+ Relids nullable_relids;
+ RestrictInfo *restrictinfo;
+
+ /*
+ * Retrieve all relids mentioned within the clause.
+ */
+ relids = pull_varnos(root, clause);
+
+ /*
+ * In ordinary SQL, a WHERE or JOIN/ON clause can't reference any rels
+ * that aren't within its syntactic scope; however, if we pulled up a
+ * LATERAL subquery then we might find such references in quals that have
+ * been pulled up. We need to treat such quals as belonging to the join
+ * level that includes every rel they reference. Although we could make
+ * pull_up_subqueries() place such quals correctly to begin with, it's
+ * easier to handle it here. When we find a clause that contains Vars
+ * outside its syntactic scope, we add it to the postponed-quals list, and
+ * process it once we've recursed back up to the appropriate join level.
+ */
+ if (!bms_is_subset(relids, qualscope))
+ {
+ PostponedQual *pq = (PostponedQual *) palloc(sizeof(PostponedQual));
+
+ Assert(root->hasLateralRTEs); /* shouldn't happen otherwise */
+ Assert(jointype == JOIN_INNER); /* mustn't postpone past outer join */
+ pq->qual = clause;
+ pq->relids = relids;
+ *postponed_qual_list = lappend(*postponed_qual_list, pq);
+ return;
+ }
+
+ /*
+ * If it's an outer-join clause, also check that relids is a subset of
+ * ojscope. (This should not fail if the syntactic scope check passed.)
+ */
+ if (ojscope && !bms_is_subset(relids, ojscope))
+ elog(ERROR, "JOIN qualification cannot refer to other relations");
+
+ /*
+ * If the clause is variable-free, our normal heuristic for pushing it
+ * down to just the mentioned rels doesn't work, because there are none.
+ *
+ * If the clause is an outer-join clause, we must force it to the OJ's
+ * semantic level to preserve semantics.
+ *
+ * Otherwise, when the clause contains volatile functions, we force it to
+ * be evaluated at its original syntactic level. This preserves the
+ * expected semantics.
+ *
+ * When the clause contains no volatile functions either, it is actually a
+ * pseudoconstant clause that will not change value during any one
+ * execution of the plan, and hence can be used as a one-time qual in a
+ * gating Result plan node. We put such a clause into the regular
+ * RestrictInfo lists for the moment, but eventually createplan.c will
+ * pull it out and make a gating Result node immediately above whatever
+ * plan node the pseudoconstant clause is assigned to. It's usually best
+ * to put a gating node as high in the plan tree as possible. If we are
+ * not below an outer join, we can actually push the pseudoconstant qual
+ * all the way to the top of the tree. If we are below an outer join, we
+ * leave the qual at its original syntactic level (we could push it up to
+ * just below the outer join, but that seems more complex than it's
+ * worth).
+ */
+ if (bms_is_empty(relids))
+ {
+ if (ojscope)
+ {
+ /* clause is attached to outer join, eval it there */
+ relids = bms_copy(ojscope);
+ /* mustn't use as gating qual, so don't mark pseudoconstant */
+ }
+ else
+ {
+ /* eval at original syntactic level */
+ relids = bms_copy(qualscope);
+ if (!contain_volatile_functions(clause))
+ {
+ /* mark as gating qual */
+ pseudoconstant = true;
+ /* tell createplan.c to check for gating quals */
+ root->hasPseudoConstantQuals = true;
+ /* if not below outer join, push it to top of tree */
+ if (!below_outer_join)
+ {
+ relids =
+ get_relids_in_jointree((Node *) root->parse->jointree,
+ false);
+ qualscope = bms_copy(relids);
+ }
+ }
+ }
+ }
+
+ /*----------
+ * Check to see if clause application must be delayed by outer-join
+ * considerations.
+ *
+ * A word about is_pushed_down: we mark the qual as "pushed down" if
+ * it is (potentially) applicable at a level different from its original
+ * syntactic level. This flag is used to distinguish OUTER JOIN ON quals
+ * from other quals pushed down to the same joinrel. The rules are:
+ * WHERE quals and INNER JOIN quals: is_pushed_down = true.
+ * Non-degenerate OUTER JOIN quals: is_pushed_down = false.
+ * Degenerate OUTER JOIN quals: is_pushed_down = true.
+ * A "degenerate" OUTER JOIN qual is one that doesn't mention the
+ * non-nullable side, and hence can be pushed down into the nullable side
+ * without changing the join result. It is correct to treat it as a
+ * regular filter condition at the level where it is evaluated.
+ *
+ * Note: it is not immediately obvious that a simple boolean is enough
+ * for this: if for some reason we were to attach a degenerate qual to
+ * its original join level, it would need to be treated as an outer join
+ * qual there. However, this cannot happen, because all the rels the
+ * clause mentions must be in the outer join's min_righthand, therefore
+ * the join it needs must be formed before the outer join; and we always
+ * attach quals to the lowest level where they can be evaluated. But
+ * if we were ever to re-introduce a mechanism for delaying evaluation
+ * of "expensive" quals, this area would need work.
+ *
+ * Note: generally, use of is_pushed_down has to go through the macro
+ * RINFO_IS_PUSHED_DOWN, because that flag alone is not always sufficient
+ * to tell whether a clause must be treated as pushed-down in context.
+ * This seems like another reason why it should perhaps be rethought.
+ *----------
+ */
+ if (bms_overlap(relids, outerjoin_nonnullable))
+ {
+ /*
+ * The qual is attached to an outer join and mentions (some of the)
+ * rels on the nonnullable side, so it's not degenerate.
+ *
+ * We can't use such a clause to deduce equivalence (the left and
+ * right sides might be unequal above the join because one of them has
+ * gone to NULL) ... but we might be able to use it for more limited
+ * deductions, if it is mergejoinable. So consider adding it to the
+ * lists of set-aside outer-join clauses.
+ */
+ is_pushed_down = false;
+ maybe_equivalence = false;
+ maybe_outer_join = true;
+
+ /* Check to see if must be delayed by lower outer join */
+ outerjoin_delayed = check_outerjoin_delay(root,
+ &relids,
+ &nullable_relids,
+ false);
+
+ /*
+ * Now force the qual to be evaluated exactly at the level of joining
+ * corresponding to the outer join. We cannot let it get pushed down
+ * into the nonnullable side, since then we'd produce no output rows,
+ * rather than the intended single null-extended row, for any
+ * nonnullable-side rows failing the qual.
+ *
+ * (Do this step after calling check_outerjoin_delay, because that
+ * trashes relids.)
+ */
+ Assert(ojscope);
+ relids = ojscope;
+ Assert(!pseudoconstant);
+ }
+ else
+ {
+ /*
+ * Normal qual clause or degenerate outer-join clause. Either way, we
+ * can mark it as pushed-down.
+ */
+ is_pushed_down = true;
+
+ /* Check to see if must be delayed by lower outer join */
+ outerjoin_delayed = check_outerjoin_delay(root,
+ &relids,
+ &nullable_relids,
+ true);
+
+ if (outerjoin_delayed)
+ {
+ /* Should still be a subset of current scope ... */
+ Assert(root->hasLateralRTEs || bms_is_subset(relids, qualscope));
+ Assert(ojscope == NULL || bms_is_subset(relids, ojscope));
+
+ /*
+ * Because application of the qual will be delayed by outer join,
+ * we mustn't assume its vars are equal everywhere.
+ */
+ maybe_equivalence = false;
+
+ /*
+ * It's possible that this is an IS NULL clause that's redundant
+ * with a lower antijoin; if so we can just discard it. We need
+ * not test in any of the other cases, because this will only be
+ * possible for pushed-down, delayed clauses.
+ */
+ if (check_redundant_nullability_qual(root, clause))
+ return;
+ }
+ else
+ {
+ /*
+ * Qual is not delayed by any lower outer-join restriction, so we
+ * can consider feeding it to the equivalence machinery. However,
+ * if it's itself within an outer-join clause, treat it as though
+ * it appeared below that outer join (note that we can only get
+ * here when the clause references only nullable-side rels).
+ */
+ maybe_equivalence = true;
+ if (outerjoin_nonnullable != NULL)
+ below_outer_join = true;
+ }
+
+ /*
+ * Since it doesn't mention the LHS, it's certainly not useful as a
+ * set-aside OJ clause, even if it's in an OJ.
+ */
+ maybe_outer_join = false;
+ }
+
+ /*
+ * Build the RestrictInfo node itself.
+ */
+ restrictinfo = make_restrictinfo(root,
+ (Expr *) clause,
+ is_pushed_down,
+ outerjoin_delayed,
+ pseudoconstant,
+ security_level,
+ relids,
+ outerjoin_nonnullable,
+ nullable_relids);
+
+ /*
+ * If it's a join clause (either naturally, or because delayed by
+ * outer-join rules), add vars used in the clause to targetlists of their
+ * relations, so that they will be emitted by the plan nodes that scan
+ * those relations (else they won't be available at the join node!).
+ *
+ * Note: if the clause gets absorbed into an EquivalenceClass then this
+ * may be unnecessary, but for now we have to do it to cover the case
+ * where the EC becomes ec_broken and we end up reinserting the original
+ * clauses into the plan.
+ */
+ if (bms_membership(relids) == BMS_MULTIPLE)
+ {
+ List *vars = pull_var_clause(clause,
+ PVC_RECURSE_AGGREGATES |
+ PVC_RECURSE_WINDOWFUNCS |
+ PVC_INCLUDE_PLACEHOLDERS);
+
+ add_vars_to_targetlist(root, vars, relids, false);
+ list_free(vars);
+ }
+
+ /*
+ * We check "mergejoinability" of every clause, not only join clauses,
+ * because we want to know about equivalences between vars of the same
+ * relation, or between vars and consts.
+ */
+ check_mergejoinable(restrictinfo);
+
+ /*
+ * If it is a true equivalence clause, send it to the EquivalenceClass
+ * machinery. We do *not* attach it directly to any restriction or join
+ * lists. The EC code will propagate it to the appropriate places later.
+ *
+ * If the clause has a mergejoinable operator and is not
+ * outerjoin-delayed, yet isn't an equivalence because it is an outer-join
+ * clause, the EC code may yet be able to do something with it. We add it
+ * to appropriate lists for further consideration later. Specifically:
+ *
+ * If it is a left or right outer-join qualification that relates the two
+ * sides of the outer join (no funny business like leftvar1 = leftvar2 +
+ * rightvar), we add it to root->left_join_clauses or
+ * root->right_join_clauses according to which side the nonnullable
+ * variable appears on.
+ *
+ * If it is a full outer-join qualification, we add it to
+ * root->full_join_clauses. (Ideally we'd discard cases that aren't
+ * leftvar = rightvar, as we do for left/right joins, but this routine
+ * doesn't have the info needed to do that; and the current usage of the
+ * full_join_clauses list doesn't require that, so it's not currently
+ * worth complicating this routine's API to make it possible.)
+ *
+ * If none of the above hold, pass it off to
+ * distribute_restrictinfo_to_rels().
+ *
+ * In all cases, it's important to initialize the left_ec and right_ec
+ * fields of a mergejoinable clause, so that all possibly mergejoinable
+ * expressions have representations in EquivalenceClasses. If
+ * process_equivalence is successful, it will take care of that;
+ * otherwise, we have to call initialize_mergeclause_eclasses to do it.
+ */
+ if (restrictinfo->mergeopfamilies)
+ {
+ if (maybe_equivalence)
+ {
+ if (check_equivalence_delay(root, restrictinfo) &&
+ process_equivalence(root, &restrictinfo, below_outer_join))
+ return;
+ /* EC rejected it, so set left_ec/right_ec the hard way ... */
+ if (restrictinfo->mergeopfamilies) /* EC might have changed this */
+ initialize_mergeclause_eclasses(root, restrictinfo);
+ /* ... and fall through to distribute_restrictinfo_to_rels */
+ }
+ else if (maybe_outer_join && restrictinfo->can_join)
+ {
+ /* we need to set up left_ec/right_ec the hard way */
+ initialize_mergeclause_eclasses(root, restrictinfo);
+ /* now see if it should go to any outer-join lists */
+ if (bms_is_subset(restrictinfo->left_relids,
+ outerjoin_nonnullable) &&
+ !bms_overlap(restrictinfo->right_relids,
+ outerjoin_nonnullable))
+ {
+ /* we have outervar = innervar */
+ root->left_join_clauses = lappend(root->left_join_clauses,
+ restrictinfo);
+ return;
+ }
+ if (bms_is_subset(restrictinfo->right_relids,
+ outerjoin_nonnullable) &&
+ !bms_overlap(restrictinfo->left_relids,
+ outerjoin_nonnullable))
+ {
+ /* we have innervar = outervar */
+ root->right_join_clauses = lappend(root->right_join_clauses,
+ restrictinfo);
+ return;
+ }
+ if (jointype == JOIN_FULL)
+ {
+ /* FULL JOIN (above tests cannot match in this case) */
+ root->full_join_clauses = lappend(root->full_join_clauses,
+ restrictinfo);
+ return;
+ }
+ /* nope, so fall through to distribute_restrictinfo_to_rels */
+ }
+ else
+ {
+ /* we still need to set up left_ec/right_ec */
+ initialize_mergeclause_eclasses(root, restrictinfo);
+ }
+ }
+
+ /* No EC special case applies, so push it into the clause lists */
+ distribute_restrictinfo_to_rels(root, restrictinfo);
+}
+
+/*
+ * check_outerjoin_delay
+ * Detect whether a qual referencing the given relids must be delayed
+ * in application due to the presence of a lower outer join, and/or
+ * may force extra delay of higher-level outer joins.
+ *
+ * If the qual must be delayed, add relids to *relids_p to reflect the lowest
+ * safe level for evaluating the qual, and return true. Any extra delay for
+ * higher-level joins is reflected by setting delay_upper_joins to true in
+ * SpecialJoinInfo structs. We also compute nullable_relids, the set of
+ * referenced relids that are nullable by lower outer joins (note that this
+ * can be nonempty even for a non-delayed qual).
+ *
+ * For an is_pushed_down qual, we can evaluate the qual as soon as (1) we have
+ * all the rels it mentions, and (2) we are at or above any outer joins that
+ * can null any of these rels and are below the syntactic location of the
+ * given qual. We must enforce (2) because pushing down such a clause below
+ * the OJ might cause the OJ to emit null-extended rows that should not have
+ * been formed, or that should have been rejected by the clause. (This is
+ * only an issue for non-strict quals, since if we can prove a qual mentioning
+ * only nullable rels is strict, we'd have reduced the outer join to an inner
+ * join in reduce_outer_joins().)
+ *
+ * To enforce (2), scan the join_info_list and merge the required-relid sets of
+ * any such OJs into the clause's own reference list. At the time we are
+ * called, the join_info_list contains only outer joins below this qual. We
+ * have to repeat the scan until no new relids get added; this ensures that
+ * the qual is suitably delayed regardless of the order in which OJs get
+ * executed. As an example, if we have one OJ with LHS=A, RHS=B, and one with
+ * LHS=B, RHS=C, it is implied that these can be done in either order; if the
+ * B/C join is done first then the join to A can null C, so a qual actually
+ * mentioning only C cannot be applied below the join to A.
+ *
+ * For a non-pushed-down qual, this isn't going to determine where we place the
+ * qual, but we need to determine outerjoin_delayed and nullable_relids anyway
+ * for use later in the planning process.
+ *
+ * Lastly, a pushed-down qual that references the nullable side of any current
+ * join_info_list member and has to be evaluated above that OJ (because its
+ * required relids overlap the LHS too) causes that OJ's delay_upper_joins
+ * flag to be set true. This will prevent any higher-level OJs from
+ * being interchanged with that OJ, which would result in not having any
+ * correct place to evaluate the qual. (The case we care about here is a
+ * sub-select WHERE clause within the RHS of some outer join. The WHERE
+ * clause must effectively be treated as a degenerate clause of that outer
+ * join's condition. Rather than trying to match such clauses with joins
+ * directly, we set delay_upper_joins here, and when the upper outer join
+ * is processed by make_outerjoininfo, it will refrain from allowing the
+ * two OJs to commute.)
+ */
+static bool
+check_outerjoin_delay(PlannerInfo *root,
+ Relids *relids_p, /* in/out parameter */
+ Relids *nullable_relids_p, /* output parameter */
+ bool is_pushed_down)
+{
+ Relids relids;
+ Relids nullable_relids;
+ bool outerjoin_delayed;
+ bool found_some;
+
+ /* fast path if no special joins */
+ if (root->join_info_list == NIL)
+ {
+ *nullable_relids_p = NULL;
+ return false;
+ }
+
+ /* must copy relids because we need the original value at the end */
+ relids = bms_copy(*relids_p);
+ nullable_relids = NULL;
+ outerjoin_delayed = false;
+ do
+ {
+ ListCell *l;
+
+ found_some = false;
+ foreach(l, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
+
+ /* do we reference any nullable rels of this OJ? */
+ if (bms_overlap(relids, sjinfo->min_righthand) ||
+ (sjinfo->jointype == JOIN_FULL &&
+ bms_overlap(relids, sjinfo->min_lefthand)))
+ {
+ /* yes; have we included all its rels in relids? */
+ if (!bms_is_subset(sjinfo->min_lefthand, relids) ||
+ !bms_is_subset(sjinfo->min_righthand, relids))
+ {
+ /* no, so add them in */
+ relids = bms_add_members(relids, sjinfo->min_lefthand);
+ relids = bms_add_members(relids, sjinfo->min_righthand);
+ outerjoin_delayed = true;
+ /* we'll need another iteration */
+ found_some = true;
+ }
+ /* track all the nullable rels of relevant OJs */
+ nullable_relids = bms_add_members(nullable_relids,
+ sjinfo->min_righthand);
+ if (sjinfo->jointype == JOIN_FULL)
+ nullable_relids = bms_add_members(nullable_relids,
+ sjinfo->min_lefthand);
+ /* set delay_upper_joins if needed */
+ if (is_pushed_down && sjinfo->jointype != JOIN_FULL &&
+ bms_overlap(relids, sjinfo->min_lefthand))
+ sjinfo->delay_upper_joins = true;
+ }
+ }
+ } while (found_some);
+
+ /* identify just the actually-referenced nullable rels */
+ nullable_relids = bms_int_members(nullable_relids, *relids_p);
+
+ /* replace *relids_p, and return nullable_relids */
+ bms_free(*relids_p);
+ *relids_p = relids;
+ *nullable_relids_p = nullable_relids;
+ return outerjoin_delayed;
+}
+
+/*
+ * check_equivalence_delay
+ * Detect whether a potential equivalence clause is rendered unsafe
+ * by outer-join-delay considerations. Return true if it's safe.
+ *
+ * The initial tests in distribute_qual_to_rels will consider a mergejoinable
+ * clause to be a potential equivalence clause if it is not outerjoin_delayed.
+ * But since the point of equivalence processing is that we will recombine the
+ * two sides of the clause with others, we have to check that each side
+ * satisfies the not-outerjoin_delayed condition on its own; otherwise it might
+ * not be safe to evaluate everywhere we could place a derived equivalence
+ * condition.
+ */
+static bool
+check_equivalence_delay(PlannerInfo *root,
+ RestrictInfo *restrictinfo)
+{
+ Relids relids;
+ Relids nullable_relids;
+
+ /* fast path if no special joins */
+ if (root->join_info_list == NIL)
+ return true;
+
+ /* must copy restrictinfo's relids to avoid changing it */
+ relids = bms_copy(restrictinfo->left_relids);
+ /* check left side does not need delay */
+ if (check_outerjoin_delay(root, &relids, &nullable_relids, true))
+ return false;
+
+ /* and similarly for the right side */
+ relids = bms_copy(restrictinfo->right_relids);
+ if (check_outerjoin_delay(root, &relids, &nullable_relids, true))
+ return false;
+
+ return true;
+}
+
+/*
+ * check_redundant_nullability_qual
+ * Check to see if the qual is an IS NULL qual that is redundant with
+ * a lower JOIN_ANTI join.
+ *
+ * We want to suppress redundant IS NULL quals, not so much to save cycles
+ * as to avoid generating bogus selectivity estimates for them. So if
+ * redundancy is detected here, distribute_qual_to_rels() just throws away
+ * the qual.
+ */
+static bool
+check_redundant_nullability_qual(PlannerInfo *root, Node *clause)
+{
+ Var *forced_null_var;
+ Index forced_null_rel;
+ ListCell *lc;
+
+ /* Check for IS NULL, and identify the Var forced to NULL */
+ forced_null_var = find_forced_null_var(clause);
+ if (forced_null_var == NULL)
+ return false;
+ forced_null_rel = forced_null_var->varno;
+
+ /*
+ * If the Var comes from the nullable side of a lower antijoin, the IS
+ * NULL condition is necessarily true.
+ */
+ foreach(lc, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
+
+ if (sjinfo->jointype == JOIN_ANTI &&
+ bms_is_member(forced_null_rel, sjinfo->syn_righthand))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * distribute_restrictinfo_to_rels
+ * Push a completed RestrictInfo into the proper restriction or join
+ * clause list(s).
+ *
+ * This is the last step of distribute_qual_to_rels() for ordinary qual
+ * clauses. Clauses that are interesting for equivalence-class processing
+ * are diverted to the EC machinery, but may ultimately get fed back here.
+ */
+void
+distribute_restrictinfo_to_rels(PlannerInfo *root,
+ RestrictInfo *restrictinfo)
+{
+ Relids relids = restrictinfo->required_relids;
+ RelOptInfo *rel;
+
+ switch (bms_membership(relids))
+ {
+ case BMS_SINGLETON:
+
+ /*
+ * There is only one relation participating in the clause, so it
+ * is a restriction clause for that relation.
+ */
+ rel = find_base_rel(root, bms_singleton_member(relids));
+
+ /* Add clause to rel's restriction list */
+ rel->baserestrictinfo = lappend(rel->baserestrictinfo,
+ restrictinfo);
+ /* Update security level info */
+ rel->baserestrict_min_security = Min(rel->baserestrict_min_security,
+ restrictinfo->security_level);
+ break;
+ case BMS_MULTIPLE:
+
+ /*
+ * The clause is a join clause, since there is more than one rel
+ * in its relid set.
+ */
+
+ /*
+ * Check for hashjoinable operators. (We don't bother setting the
+ * hashjoin info except in true join clauses.)
+ */
+ check_hashjoinable(restrictinfo);
+
+ /*
+ * Likewise, check if the clause is suitable to be used with a
+ * Memoize node to cache inner tuples during a parameterized
+ * nested loop.
+ */
+ check_memoizable(restrictinfo);
+
+ /*
+ * Add clause to the join lists of all the relevant relations.
+ */
+ add_join_clause_to_rels(root, restrictinfo, relids);
+ break;
+ default:
+
+ /*
+ * clause references no rels, and therefore we have no place to
+ * attach it. Shouldn't get here if callers are working properly.
+ */
+ elog(ERROR, "cannot cope with variable-free clause");
+ break;
+ }
+}
+
+/*
+ * process_implied_equality
+ * Create a restrictinfo item that says "item1 op item2", and push it
+ * into the appropriate lists. (In practice opno is always a btree
+ * equality operator.)
+ *
+ * "qualscope" is the nominal syntactic level to impute to the restrictinfo.
+ * This must contain at least all the rels used in the expressions, but it
+ * is used only to set the qual application level when both exprs are
+ * variable-free. Otherwise the qual is applied at the lowest join level
+ * that provides all its variables.
+ *
+ * "nullable_relids" is the set of relids used in the expressions that are
+ * potentially nullable below the expressions. (This has to be supplied by
+ * caller because this function is used after deconstruct_jointree, so we
+ * don't have knowledge of where the clause items came from.)
+ *
+ * "security_level" is the security level to assign to the new restrictinfo.
+ *
+ * "both_const" indicates whether both items are known pseudo-constant;
+ * in this case it is worth applying eval_const_expressions() in case we
+ * can produce constant TRUE or constant FALSE. (Otherwise it's not,
+ * because the expressions went through eval_const_expressions already.)
+ *
+ * Returns the generated RestrictInfo, if any. The result will be NULL
+ * if both_const is true and we successfully reduced the clause to
+ * constant TRUE.
+ *
+ * Note: this function will copy item1 and item2, but it is caller's
+ * responsibility to make sure that the Relids parameters are fresh copies
+ * not shared with other uses.
+ *
+ * Note: we do not do initialize_mergeclause_eclasses() here. It is
+ * caller's responsibility that left_ec/right_ec be set as necessary.
+ */
+RestrictInfo *
+process_implied_equality(PlannerInfo *root,
+ Oid opno,
+ Oid collation,
+ Expr *item1,
+ Expr *item2,
+ Relids qualscope,
+ Relids nullable_relids,
+ Index security_level,
+ bool below_outer_join,
+ bool both_const)
+{
+ RestrictInfo *restrictinfo;
+ Node *clause;
+ Relids relids;
+ bool pseudoconstant = false;
+
+ /*
+ * Build the new clause. Copy to ensure it shares no substructure with
+ * original (this is necessary in case there are subselects in there...)
+ */
+ clause = (Node *) make_opclause(opno,
+ BOOLOID, /* opresulttype */
+ false, /* opretset */
+ copyObject(item1),
+ copyObject(item2),
+ InvalidOid,
+ collation);
+
+ /* If both constant, try to reduce to a boolean constant. */
+ if (both_const)
+ {
+ clause = eval_const_expressions(root, clause);
+
+ /* If we produced const TRUE, just drop the clause */
+ if (clause && IsA(clause, Const))
+ {
+ Const *cclause = (Const *) clause;
+
+ Assert(cclause->consttype == BOOLOID);
+ if (!cclause->constisnull && DatumGetBool(cclause->constvalue))
+ return NULL;
+ }
+ }
+
+ /*
+ * The rest of this is a very cut-down version of distribute_qual_to_rels.
+ * We can skip most of the work therein, but there are a couple of special
+ * cases we still have to handle.
+ *
+ * Retrieve all relids mentioned within the possibly-simplified clause.
+ */
+ relids = pull_varnos(root, clause);
+ Assert(bms_is_subset(relids, qualscope));
+
+ /*
+ * If the clause is variable-free, our normal heuristic for pushing it
+ * down to just the mentioned rels doesn't work, because there are none.
+ * Apply at the given qualscope, or at the top of tree if it's nonvolatile
+ * (which it very likely is, but we'll check, just to be sure).
+ */
+ if (bms_is_empty(relids))
+ {
+ /* eval at original syntactic level */
+ relids = bms_copy(qualscope);
+ if (!contain_volatile_functions(clause))
+ {
+ /* mark as gating qual */
+ pseudoconstant = true;
+ /* tell createplan.c to check for gating quals */
+ root->hasPseudoConstantQuals = true;
+ /* if not below outer join, push it to top of tree */
+ if (!below_outer_join)
+ {
+ relids =
+ get_relids_in_jointree((Node *) root->parse->jointree,
+ false);
+ }
+ }
+ }
+
+ /*
+ * Build the RestrictInfo node itself.
+ */
+ restrictinfo = make_restrictinfo(root,
+ (Expr *) clause,
+ true, /* is_pushed_down */
+ false, /* outerjoin_delayed */
+ pseudoconstant,
+ security_level,
+ relids,
+ NULL, /* outer_relids */
+ nullable_relids);
+
+ /*
+ * If it's a join clause, add vars used in the clause to targetlists of
+ * their relations, so that they will be emitted by the plan nodes that
+ * scan those relations (else they won't be available at the join node!).
+ *
+ * Typically, we'd have already done this when the component expressions
+ * were first seen by distribute_qual_to_rels; but it is possible that
+ * some of the Vars could have missed having that done because they only
+ * appeared in single-relation clauses originally. So do it here for
+ * safety.
+ */
+ if (bms_membership(relids) == BMS_MULTIPLE)
+ {
+ List *vars = pull_var_clause(clause,
+ PVC_RECURSE_AGGREGATES |
+ PVC_RECURSE_WINDOWFUNCS |
+ PVC_INCLUDE_PLACEHOLDERS);
+
+ add_vars_to_targetlist(root, vars, relids, false);
+ list_free(vars);
+ }
+
+ /*
+ * Check mergejoinability. This will usually succeed, since the op came
+ * from an EquivalenceClass; but we could have reduced the original clause
+ * to a constant.
+ */
+ check_mergejoinable(restrictinfo);
+
+ /*
+ * Note we don't do initialize_mergeclause_eclasses(); the caller can
+ * handle that much more cheaply than we can. It's okay to call
+ * distribute_restrictinfo_to_rels() before that happens.
+ */
+
+ /*
+ * Push the new clause into all the appropriate restrictinfo lists.
+ */
+ distribute_restrictinfo_to_rels(root, restrictinfo);
+
+ return restrictinfo;
+}
+
+/*
+ * build_implied_join_equality --- build a RestrictInfo for a derived equality
+ *
+ * This overlaps the functionality of process_implied_equality(), but we
+ * must not push the RestrictInfo into the joininfo tree.
+ *
+ * Note: this function will copy item1 and item2, but it is caller's
+ * responsibility to make sure that the Relids parameters are fresh copies
+ * not shared with other uses.
+ *
+ * Note: we do not do initialize_mergeclause_eclasses() here. It is
+ * caller's responsibility that left_ec/right_ec be set as necessary.
+ */
+RestrictInfo *
+build_implied_join_equality(PlannerInfo *root,
+ Oid opno,
+ Oid collation,
+ Expr *item1,
+ Expr *item2,
+ Relids qualscope,
+ Relids nullable_relids,
+ Index security_level)
+{
+ RestrictInfo *restrictinfo;
+ Expr *clause;
+
+ /*
+ * Build the new clause. Copy to ensure it shares no substructure with
+ * original (this is necessary in case there are subselects in there...)
+ */
+ clause = make_opclause(opno,
+ BOOLOID, /* opresulttype */
+ false, /* opretset */
+ copyObject(item1),
+ copyObject(item2),
+ InvalidOid,
+ collation);
+
+ /*
+ * Build the RestrictInfo node itself.
+ */
+ restrictinfo = make_restrictinfo(root,
+ clause,
+ true, /* is_pushed_down */
+ false, /* outerjoin_delayed */
+ false, /* pseudoconstant */
+ security_level, /* security_level */
+ qualscope, /* required_relids */
+ NULL, /* outer_relids */
+ nullable_relids); /* nullable_relids */
+
+ /* Set mergejoinability/hashjoinability flags */
+ check_mergejoinable(restrictinfo);
+ check_hashjoinable(restrictinfo);
+ check_memoizable(restrictinfo);
+
+ return restrictinfo;
+}
+
+
+/*
+ * match_foreign_keys_to_quals
+ * Match foreign-key constraints to equivalence classes and join quals
+ *
+ * The idea here is to see which query join conditions match equality
+ * constraints of a foreign-key relationship. For such join conditions,
+ * we can use the FK semantics to make selectivity estimates that are more
+ * reliable than estimating from statistics, especially for multiple-column
+ * FKs, where the normal assumption of independent conditions tends to fail.
+ *
+ * In this function we annotate the ForeignKeyOptInfos in root->fkey_list
+ * with info about which eclasses and join qual clauses they match, and
+ * discard any ForeignKeyOptInfos that are irrelevant for the query.
+ */
+void
+match_foreign_keys_to_quals(PlannerInfo *root)
+{
+ List *newlist = NIL;
+ ListCell *lc;
+
+ foreach(lc, root->fkey_list)
+ {
+ ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
+ RelOptInfo *con_rel;
+ RelOptInfo *ref_rel;
+ int colno;
+
+ /*
+ * Either relid might identify a rel that is in the query's rtable but
+ * isn't referenced by the jointree so won't have a RelOptInfo. Hence
+ * don't use find_base_rel() here. We can ignore such FKs.
+ */
+ if (fkinfo->con_relid >= root->simple_rel_array_size ||
+ fkinfo->ref_relid >= root->simple_rel_array_size)
+ continue; /* just paranoia */
+ con_rel = root->simple_rel_array[fkinfo->con_relid];
+ if (con_rel == NULL)
+ continue;
+ ref_rel = root->simple_rel_array[fkinfo->ref_relid];
+ if (ref_rel == NULL)
+ continue;
+
+ /*
+ * Ignore FK unless both rels are baserels. This gets rid of FKs that
+ * link to inheritance child rels (otherrels) and those that link to
+ * rels removed by join removal (dead rels).
+ */
+ if (con_rel->reloptkind != RELOPT_BASEREL ||
+ ref_rel->reloptkind != RELOPT_BASEREL)
+ continue;
+
+ /*
+ * Scan the columns and try to match them to eclasses and quals.
+ *
+ * Note: for simple inner joins, any match should be in an eclass.
+ * "Loose" quals that syntactically match an FK equality must have
+ * been rejected for EC status because they are outer-join quals or
+ * similar. We can still consider them to match the FK if they are
+ * not outerjoin_delayed.
+ */
+ for (colno = 0; colno < fkinfo->nkeys; colno++)
+ {
+ EquivalenceClass *ec;
+ AttrNumber con_attno,
+ ref_attno;
+ Oid fpeqop;
+ ListCell *lc2;
+
+ ec = match_eclasses_to_foreign_key_col(root, fkinfo, colno);
+ /* Don't bother looking for loose quals if we got an EC match */
+ if (ec != NULL)
+ {
+ fkinfo->nmatched_ec++;
+ if (ec->ec_has_const)
+ fkinfo->nconst_ec++;
+ continue;
+ }
+
+ /*
+ * Scan joininfo list for relevant clauses. Either rel's joininfo
+ * list would do equally well; we use con_rel's.
+ */
+ con_attno = fkinfo->conkey[colno];
+ ref_attno = fkinfo->confkey[colno];
+ fpeqop = InvalidOid; /* we'll look this up only if needed */
+
+ foreach(lc2, con_rel->joininfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc2);
+ OpExpr *clause = (OpExpr *) rinfo->clause;
+ Var *leftvar;
+ Var *rightvar;
+
+ /* Ignore outerjoin-delayed clauses */
+ if (rinfo->outerjoin_delayed)
+ continue;
+
+ /* Only binary OpExprs are useful for consideration */
+ if (!IsA(clause, OpExpr) ||
+ list_length(clause->args) != 2)
+ continue;
+ leftvar = (Var *) get_leftop((Expr *) clause);
+ rightvar = (Var *) get_rightop((Expr *) clause);
+
+ /* Operands must be Vars, possibly with RelabelType */
+ while (leftvar && IsA(leftvar, RelabelType))
+ leftvar = (Var *) ((RelabelType *) leftvar)->arg;
+ if (!(leftvar && IsA(leftvar, Var)))
+ continue;
+ while (rightvar && IsA(rightvar, RelabelType))
+ rightvar = (Var *) ((RelabelType *) rightvar)->arg;
+ if (!(rightvar && IsA(rightvar, Var)))
+ continue;
+
+ /* Now try to match the vars to the current foreign key cols */
+ if (fkinfo->ref_relid == leftvar->varno &&
+ ref_attno == leftvar->varattno &&
+ fkinfo->con_relid == rightvar->varno &&
+ con_attno == rightvar->varattno)
+ {
+ /* Vars match, but is it the right operator? */
+ if (clause->opno == fkinfo->conpfeqop[colno])
+ {
+ fkinfo->rinfos[colno] = lappend(fkinfo->rinfos[colno],
+ rinfo);
+ fkinfo->nmatched_ri++;
+ }
+ }
+ else if (fkinfo->ref_relid == rightvar->varno &&
+ ref_attno == rightvar->varattno &&
+ fkinfo->con_relid == leftvar->varno &&
+ con_attno == leftvar->varattno)
+ {
+ /*
+ * Reverse match, must check commutator operator. Look it
+ * up if we didn't already. (In the worst case we might
+ * do multiple lookups here, but that would require an FK
+ * equality operator without commutator, which is
+ * unlikely.)
+ */
+ if (!OidIsValid(fpeqop))
+ fpeqop = get_commutator(fkinfo->conpfeqop[colno]);
+ if (clause->opno == fpeqop)
+ {
+ fkinfo->rinfos[colno] = lappend(fkinfo->rinfos[colno],
+ rinfo);
+ fkinfo->nmatched_ri++;
+ }
+ }
+ }
+ /* If we found any matching loose quals, count col as matched */
+ if (fkinfo->rinfos[colno])
+ fkinfo->nmatched_rcols++;
+ }
+
+ /*
+ * Currently, we drop multicolumn FKs that aren't fully matched to the
+ * query. Later we might figure out how to derive some sort of
+ * estimate from them, in which case this test should be weakened to
+ * "if ((fkinfo->nmatched_ec + fkinfo->nmatched_rcols) > 0)".
+ */
+ if ((fkinfo->nmatched_ec + fkinfo->nmatched_rcols) == fkinfo->nkeys)
+ newlist = lappend(newlist, fkinfo);
+ }
+ /* Replace fkey_list, thereby discarding any useless entries */
+ root->fkey_list = newlist;
+}
+
+
+/*****************************************************************************
+ *
+ * CHECKS FOR MERGEJOINABLE AND HASHJOINABLE CLAUSES
+ *
+ *****************************************************************************/
+
+/*
+ * check_mergejoinable
+ * If the restrictinfo's clause is mergejoinable, set the mergejoin
+ * info fields in the restrictinfo.
+ *
+ * Currently, we support mergejoin for binary opclauses where
+ * the operator is a mergejoinable operator. The arguments can be
+ * anything --- as long as there are no volatile functions in them.
+ */
+static void
+check_mergejoinable(RestrictInfo *restrictinfo)
+{
+ Expr *clause = restrictinfo->clause;
+ Oid opno;
+ Node *leftarg;
+
+ if (restrictinfo->pseudoconstant)
+ return;
+ if (!is_opclause(clause))
+ return;
+ if (list_length(((OpExpr *) clause)->args) != 2)
+ return;
+
+ opno = ((OpExpr *) clause)->opno;
+ leftarg = linitial(((OpExpr *) clause)->args);
+
+ if (op_mergejoinable(opno, exprType(leftarg)) &&
+ !contain_volatile_functions((Node *) restrictinfo))
+ restrictinfo->mergeopfamilies = get_mergejoin_opfamilies(opno);
+
+ /*
+ * Note: op_mergejoinable is just a hint; if we fail to find the operator
+ * in any btree opfamilies, mergeopfamilies remains NIL and so the clause
+ * is not treated as mergejoinable.
+ */
+}
+
+/*
+ * check_hashjoinable
+ * If the restrictinfo's clause is hashjoinable, set the hashjoin
+ * info fields in the restrictinfo.
+ *
+ * Currently, we support hashjoin for binary opclauses where
+ * the operator is a hashjoinable operator. The arguments can be
+ * anything --- as long as there are no volatile functions in them.
+ */
+static void
+check_hashjoinable(RestrictInfo *restrictinfo)
+{
+ Expr *clause = restrictinfo->clause;
+ Oid opno;
+ Node *leftarg;
+
+ if (restrictinfo->pseudoconstant)
+ return;
+ if (!is_opclause(clause))
+ return;
+ if (list_length(((OpExpr *) clause)->args) != 2)
+ return;
+
+ opno = ((OpExpr *) clause)->opno;
+ leftarg = linitial(((OpExpr *) clause)->args);
+
+ if (op_hashjoinable(opno, exprType(leftarg)) &&
+ !contain_volatile_functions((Node *) restrictinfo))
+ restrictinfo->hashjoinoperator = opno;
+}
+
+/*
+ * check_memoizable
+ * If the restrictinfo's clause is suitable to be used for a Memoize node,
+ * set the lefthasheqoperator and righthasheqoperator to the hash equality
+ * operator that will be needed during caching.
+ */
+static void
+check_memoizable(RestrictInfo *restrictinfo)
+{
+ TypeCacheEntry *typentry;
+ Expr *clause = restrictinfo->clause;
+ Oid lefttype;
+ Oid righttype;
+
+ if (restrictinfo->pseudoconstant)
+ return;
+ if (!is_opclause(clause))
+ return;
+ if (list_length(((OpExpr *) clause)->args) != 2)
+ return;
+
+ lefttype = exprType(linitial(((OpExpr *) clause)->args));
+
+ typentry = lookup_type_cache(lefttype, TYPECACHE_HASH_PROC |
+ TYPECACHE_EQ_OPR);
+
+ if (OidIsValid(typentry->hash_proc) && OidIsValid(typentry->eq_opr))
+ restrictinfo->left_hasheqoperator = typentry->eq_opr;
+
+ righttype = exprType(lsecond(((OpExpr *) clause)->args));
+
+ /*
+ * Lookup the right type, unless it's the same as the left type, in which
+ * case typentry is already pointing to the required TypeCacheEntry.
+ */
+ if (lefttype != righttype)
+ typentry = lookup_type_cache(righttype, TYPECACHE_HASH_PROC |
+ TYPECACHE_EQ_OPR);
+
+ if (OidIsValid(typentry->hash_proc) && OidIsValid(typentry->eq_opr))
+ restrictinfo->right_hasheqoperator = typentry->eq_opr;
+}
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
new file mode 100644
index 0000000..9330908
--- /dev/null
+++ b/src/backend/optimizer/plan/planagg.c
@@ -0,0 +1,513 @@
+/*-------------------------------------------------------------------------
+ *
+ * planagg.c
+ * Special planning for aggregate queries.
+ *
+ * This module tries to replace MIN/MAX aggregate functions by subqueries
+ * of the form
+ * (SELECT col FROM tab
+ * WHERE col IS NOT NULL AND existing-quals
+ * ORDER BY col ASC/DESC
+ * LIMIT 1)
+ * Given a suitable index on tab.col, this can be much faster than the
+ * generic scan-all-the-rows aggregation plan. We can handle multiple
+ * MIN/MAX aggregates by generating multiple subqueries, and their
+ * orderings can be different. However, if the query contains any
+ * non-optimizable aggregates, there's no point since we'll have to
+ * scan all the rows anyway.
+ *
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/plan/planagg.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "catalog/pg_aggregate.h"
+#include "catalog/pg_type.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/planmain.h"
+#include "optimizer/subselect.h"
+#include "optimizer/tlist.h"
+#include "parser/parse_clause.h"
+#include "parser/parsetree.h"
+#include "rewrite/rewriteManip.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+
+static bool can_minmax_aggs(PlannerInfo *root, List **context);
+static bool build_minmax_path(PlannerInfo *root, MinMaxAggInfo *mminfo,
+ Oid eqop, Oid sortop, bool nulls_first);
+static void minmax_qp_callback(PlannerInfo *root, void *extra);
+static Oid fetch_agg_sort_op(Oid aggfnoid);
+
+
+/*
+ * preprocess_minmax_aggregates - preprocess MIN/MAX aggregates
+ *
+ * Check to see whether the query contains MIN/MAX aggregate functions that
+ * might be optimizable via indexscans. If it does, and all the aggregates
+ * are potentially optimizable, then create a MinMaxAggPath and add it to
+ * the (UPPERREL_GROUP_AGG, NULL) upperrel.
+ *
+ * This should be called by grouping_planner() just before it's ready to call
+ * query_planner(), because we generate indexscan paths by cloning the
+ * planner's state and invoking query_planner() on a modified version of
+ * the query parsetree. Thus, all preprocessing needed before query_planner()
+ * must already be done. This relies on the list of aggregates in
+ * root->agginfos, so preprocess_aggrefs() must have been called already, too.
+ */
+void
+preprocess_minmax_aggregates(PlannerInfo *root)
+{
+ Query *parse = root->parse;
+ FromExpr *jtnode;
+ RangeTblRef *rtr;
+ RangeTblEntry *rte;
+ List *aggs_list;
+ RelOptInfo *grouped_rel;
+ ListCell *lc;
+
+ /* minmax_aggs list should be empty at this point */
+ Assert(root->minmax_aggs == NIL);
+
+ /* Nothing to do if query has no aggregates */
+ if (!parse->hasAggs)
+ return;
+
+ Assert(!parse->setOperations); /* shouldn't get here if a setop */
+ Assert(parse->rowMarks == NIL); /* nor if FOR UPDATE */
+
+ /*
+ * Reject unoptimizable cases.
+ *
+ * We don't handle GROUP BY or windowing, because our current
+ * implementations of grouping require looking at all the rows anyway, and
+ * so there's not much point in optimizing MIN/MAX.
+ */
+ if (parse->groupClause || list_length(parse->groupingSets) > 1 ||
+ parse->hasWindowFuncs)
+ return;
+
+ /*
+ * Reject if query contains any CTEs; there's no way to build an indexscan
+ * on one so we couldn't succeed here. (If the CTEs are unreferenced,
+ * that's not true, but it doesn't seem worth expending cycles to check.)
+ */
+ if (parse->cteList)
+ return;
+
+ /*
+ * We also restrict the query to reference exactly one table, since join
+ * conditions can't be handled reasonably. (We could perhaps handle a
+ * query containing cartesian-product joins, but it hardly seems worth the
+ * trouble.) However, the single table could be buried in several levels
+ * of FromExpr due to subqueries. Note the "single" table could be an
+ * inheritance parent, too, including the case of a UNION ALL subquery
+ * that's been flattened to an appendrel.
+ */
+ jtnode = parse->jointree;
+ while (IsA(jtnode, FromExpr))
+ {
+ if (list_length(jtnode->fromlist) != 1)
+ return;
+ jtnode = linitial(jtnode->fromlist);
+ }
+ if (!IsA(jtnode, RangeTblRef))
+ return;
+ rtr = (RangeTblRef *) jtnode;
+ rte = planner_rt_fetch(rtr->rtindex, root);
+ if (rte->rtekind == RTE_RELATION)
+ /* ordinary relation, ok */ ;
+ else if (rte->rtekind == RTE_SUBQUERY && rte->inh)
+ /* flattened UNION ALL subquery, ok */ ;
+ else
+ return;
+
+ /*
+ * Scan the tlist and HAVING qual to find all the aggregates and verify
+ * all are MIN/MAX aggregates. Stop as soon as we find one that isn't.
+ */
+ aggs_list = NIL;
+ if (!can_minmax_aggs(root, &aggs_list))
+ return;
+
+ /*
+ * OK, there is at least the possibility of performing the optimization.
+ * Build an access path for each aggregate. If any of the aggregates
+ * prove to be non-indexable, give up; there is no point in optimizing
+ * just some of them.
+ */
+ foreach(lc, aggs_list)
+ {
+ MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
+ Oid eqop;
+ bool reverse;
+
+ /*
+ * We'll need the equality operator that goes with the aggregate's
+ * ordering operator.
+ */
+ eqop = get_equality_op_for_ordering_op(mminfo->aggsortop, &reverse);
+ if (!OidIsValid(eqop)) /* shouldn't happen */
+ elog(ERROR, "could not find equality operator for ordering operator %u",
+ mminfo->aggsortop);
+
+ /*
+ * We can use either an ordering that gives NULLS FIRST or one that
+ * gives NULLS LAST; furthermore there's unlikely to be much
+ * performance difference between them, so it doesn't seem worth
+ * costing out both ways if we get a hit on the first one. NULLS
+ * FIRST is more likely to be available if the operator is a
+ * reverse-sort operator, so try that first if reverse.
+ */
+ if (build_minmax_path(root, mminfo, eqop, mminfo->aggsortop, reverse))
+ continue;
+ if (build_minmax_path(root, mminfo, eqop, mminfo->aggsortop, !reverse))
+ continue;
+
+ /* No indexable path for this aggregate, so fail */
+ return;
+ }
+
+ /*
+ * OK, we can do the query this way. Prepare to create a MinMaxAggPath
+ * node.
+ *
+ * First, create an output Param node for each agg. (If we end up not
+ * using the MinMaxAggPath, we'll waste a PARAM_EXEC slot for each agg,
+ * which is not worth worrying about. We can't wait till create_plan time
+ * to decide whether to make the Param, unfortunately.)
+ */
+ foreach(lc, aggs_list)
+ {
+ MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
+
+ mminfo->param =
+ SS_make_initplan_output_param(root,
+ exprType((Node *) mminfo->target),
+ -1,
+ exprCollation((Node *) mminfo->target));
+ }
+
+ /*
+ * Create a MinMaxAggPath node with the appropriate estimated costs and
+ * other needed data, and add it to the UPPERREL_GROUP_AGG upperrel, where
+ * it will compete against the standard aggregate implementation. (It
+ * will likely always win, but we need not assume that here.)
+ *
+ * Note: grouping_planner won't have created this upperrel yet, but it's
+ * fine for us to create it first. We will not have inserted the correct
+ * consider_parallel value in it, but MinMaxAggPath paths are currently
+ * never parallel-safe anyway, so that doesn't matter. Likewise, it
+ * doesn't matter that we haven't filled FDW-related fields in the rel.
+ * Also, because there are no rowmarks, we know that the processed_tlist
+ * doesn't need to change anymore, so making the pathtarget now is safe.
+ */
+ grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
+ add_path(grouped_rel, (Path *)
+ create_minmaxagg_path(root, grouped_rel,
+ create_pathtarget(root,
+ root->processed_tlist),
+ aggs_list,
+ (List *) parse->havingQual));
+}
+
+/*
+ * can_minmax_aggs
+ * Walk through all the aggregates in the query, and check
+ * if they are all MIN/MAX aggregates. If so, build a list of the
+ * distinct aggregate calls in the tree.
+ *
+ * Returns false if a non-MIN/MAX aggregate is found, true otherwise.
+ *
+ * This does not descend into subqueries, and so should be used only after
+ * reduction of sublinks to subplans. There mustn't be outer-aggregate
+ * references either.
+ */
+static bool
+can_minmax_aggs(PlannerInfo *root, List **context)
+{
+ ListCell *lc;
+
+ foreach(lc, root->agginfos)
+ {
+ AggInfo *agginfo = (AggInfo *) lfirst(lc);
+ Aggref *aggref = agginfo->representative_aggref;
+ Oid aggsortop;
+ TargetEntry *curTarget;
+ MinMaxAggInfo *mminfo;
+
+ Assert(aggref->agglevelsup == 0);
+ if (list_length(aggref->args) != 1)
+ return false; /* it couldn't be MIN/MAX */
+
+ /*
+ * ORDER BY is usually irrelevant for MIN/MAX, but it can change the
+ * outcome if the aggsortop's operator class recognizes non-identical
+ * values as equal. For example, 4.0 and 4.00 are equal according to
+ * numeric_ops, yet distinguishable. If MIN() receives more than one
+ * value equal to 4.0 and no value less than 4.0, it is unspecified
+ * which of those equal values MIN() returns. An ORDER BY expression
+ * that differs for each of those equal values of the argument
+ * expression makes the result predictable once again. This is a
+ * niche requirement, and we do not implement it with subquery paths.
+ * In any case, this test lets us reject ordered-set aggregates
+ * quickly.
+ */
+ if (aggref->aggorder != NIL)
+ return false;
+ /* note: we do not care if DISTINCT is mentioned ... */
+
+ /*
+ * We might implement the optimization when a FILTER clause is present
+ * by adding the filter to the quals of the generated subquery. For
+ * now, just punt.
+ */
+ if (aggref->aggfilter != NULL)
+ return false;
+
+ aggsortop = fetch_agg_sort_op(aggref->aggfnoid);
+ if (!OidIsValid(aggsortop))
+ return false; /* not a MIN/MAX aggregate */
+
+ curTarget = (TargetEntry *) linitial(aggref->args);
+
+ if (contain_mutable_functions((Node *) curTarget->expr))
+ return false; /* not potentially indexable */
+
+ if (type_is_rowtype(exprType((Node *) curTarget->expr)))
+ return false; /* IS NOT NULL would have weird semantics */
+
+ mminfo = makeNode(MinMaxAggInfo);
+ mminfo->aggfnoid = aggref->aggfnoid;
+ mminfo->aggsortop = aggsortop;
+ mminfo->target = curTarget->expr;
+ mminfo->subroot = NULL; /* don't compute path yet */
+ mminfo->path = NULL;
+ mminfo->pathcost = 0;
+ mminfo->param = NULL;
+
+ *context = lappend(*context, mminfo);
+ }
+ return true;
+}
+
+/*
+ * build_minmax_path
+ * Given a MIN/MAX aggregate, try to build an indexscan Path it can be
+ * optimized with.
+ *
+ * If successful, stash the best path in *mminfo and return true.
+ * Otherwise, return false.
+ */
+static bool
+build_minmax_path(PlannerInfo *root, MinMaxAggInfo *mminfo,
+ Oid eqop, Oid sortop, bool nulls_first)
+{
+ PlannerInfo *subroot;
+ Query *parse;
+ TargetEntry *tle;
+ List *tlist;
+ NullTest *ntest;
+ SortGroupClause *sortcl;
+ RelOptInfo *final_rel;
+ Path *sorted_path;
+ Cost path_cost;
+ double path_fraction;
+
+ /*
+ * We are going to construct what is effectively a sub-SELECT query, so
+ * clone the current query level's state and adjust it to make it look
+ * like a subquery. Any outer references will now be one level higher
+ * than before. (This means that when we are done, there will be no Vars
+ * of level 1, which is why the subquery can become an initplan.)
+ */
+ subroot = (PlannerInfo *) palloc(sizeof(PlannerInfo));
+ memcpy(subroot, root, sizeof(PlannerInfo));
+ subroot->query_level++;
+ subroot->parent_root = root;
+ /* reset subplan-related stuff */
+ subroot->plan_params = NIL;
+ subroot->outer_params = NULL;
+ subroot->init_plans = NIL;
+ subroot->agginfos = NIL;
+ subroot->aggtransinfos = NIL;
+
+ subroot->parse = parse = copyObject(root->parse);
+ IncrementVarSublevelsUp((Node *) parse, 1, 1);
+
+ /* append_rel_list might contain outer Vars? */
+ subroot->append_rel_list = copyObject(root->append_rel_list);
+ IncrementVarSublevelsUp((Node *) subroot->append_rel_list, 1, 1);
+ /* There shouldn't be any OJ info to translate, as yet */
+ Assert(subroot->join_info_list == NIL);
+ /* and we haven't made equivalence classes, either */
+ Assert(subroot->eq_classes == NIL);
+ /* and we haven't created PlaceHolderInfos, either */
+ Assert(subroot->placeholder_list == NIL);
+
+ /*----------
+ * Generate modified query of the form
+ * (SELECT col FROM tab
+ * WHERE col IS NOT NULL AND existing-quals
+ * ORDER BY col ASC/DESC
+ * LIMIT 1)
+ *----------
+ */
+ /* single tlist entry that is the aggregate target */
+ tle = makeTargetEntry(copyObject(mminfo->target),
+ (AttrNumber) 1,
+ pstrdup("agg_target"),
+ false);
+ tlist = list_make1(tle);
+ subroot->processed_tlist = parse->targetList = tlist;
+
+ /* No HAVING, no DISTINCT, no aggregates anymore */
+ parse->havingQual = NULL;
+ subroot->hasHavingQual = false;
+ parse->distinctClause = NIL;
+ parse->hasDistinctOn = false;
+ parse->hasAggs = false;
+
+ /* Build "target IS NOT NULL" expression */
+ ntest = makeNode(NullTest);
+ ntest->nulltesttype = IS_NOT_NULL;
+ ntest->arg = copyObject(mminfo->target);
+ /* we checked it wasn't a rowtype in find_minmax_aggs_walker */
+ ntest->argisrow = false;
+ ntest->location = -1;
+
+ /* User might have had that in WHERE already */
+ if (!list_member((List *) parse->jointree->quals, ntest))
+ parse->jointree->quals = (Node *)
+ lcons(ntest, (List *) parse->jointree->quals);
+
+ /* Build suitable ORDER BY clause */
+ sortcl = makeNode(SortGroupClause);
+ sortcl->tleSortGroupRef = assignSortGroupRef(tle, subroot->processed_tlist);
+ sortcl->eqop = eqop;
+ sortcl->sortop = sortop;
+ sortcl->nulls_first = nulls_first;
+ sortcl->hashable = false; /* no need to make this accurate */
+ parse->sortClause = list_make1(sortcl);
+
+ /* set up expressions for LIMIT 1 */
+ parse->limitOffset = NULL;
+ parse->limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
+ sizeof(int64),
+ Int64GetDatum(1), false,
+ FLOAT8PASSBYVAL);
+
+ /*
+ * Generate the best paths for this query, telling query_planner that we
+ * have LIMIT 1.
+ */
+ subroot->tuple_fraction = 1.0;
+ subroot->limit_tuples = 1.0;
+
+ final_rel = query_planner(subroot, minmax_qp_callback, NULL);
+
+ /*
+ * Since we didn't go through subquery_planner() to handle the subquery,
+ * we have to do some of the same cleanup it would do, in particular cope
+ * with params and initplans used within this subquery. (This won't
+ * matter if we end up not using the subplan.)
+ */
+ SS_identify_outer_params(subroot);
+ SS_charge_for_initplans(subroot, final_rel);
+
+ /*
+ * Get the best presorted path, that being the one that's cheapest for
+ * fetching just one row. If there's no such path, fail.
+ */
+ if (final_rel->rows > 1.0)
+ path_fraction = 1.0 / final_rel->rows;
+ else
+ path_fraction = 1.0;
+
+ sorted_path =
+ get_cheapest_fractional_path_for_pathkeys(final_rel->pathlist,
+ subroot->query_pathkeys,
+ NULL,
+ path_fraction);
+ if (!sorted_path)
+ return false;
+
+ /*
+ * The path might not return exactly what we want, so fix that. (We
+ * assume that this won't change any conclusions about which was the
+ * cheapest path.)
+ */
+ sorted_path = apply_projection_to_path(subroot, final_rel, sorted_path,
+ create_pathtarget(subroot,
+ subroot->processed_tlist));
+
+ /*
+ * Determine cost to get just the first row of the presorted path.
+ *
+ * Note: cost calculation here should match
+ * compare_fractional_path_costs().
+ */
+ path_cost = sorted_path->startup_cost +
+ path_fraction * (sorted_path->total_cost - sorted_path->startup_cost);
+
+ /* Save state for further processing */
+ mminfo->subroot = subroot;
+ mminfo->path = sorted_path;
+ mminfo->pathcost = path_cost;
+
+ return true;
+}
+
+/*
+ * Compute query_pathkeys and other pathkeys during query_planner()
+ */
+static void
+minmax_qp_callback(PlannerInfo *root, void *extra)
+{
+ root->group_pathkeys = NIL;
+ root->window_pathkeys = NIL;
+ root->distinct_pathkeys = NIL;
+
+ root->sort_pathkeys =
+ make_pathkeys_for_sortclauses(root,
+ root->parse->sortClause,
+ root->parse->targetList);
+
+ root->query_pathkeys = root->sort_pathkeys;
+}
+
+/*
+ * Get the OID of the sort operator, if any, associated with an aggregate.
+ * Returns InvalidOid if there is no such operator.
+ */
+static Oid
+fetch_agg_sort_op(Oid aggfnoid)
+{
+ HeapTuple aggTuple;
+ Form_pg_aggregate aggform;
+ Oid aggsortop;
+
+ /* fetch aggregate entry from pg_aggregate */
+ aggTuple = SearchSysCache1(AGGFNOID, ObjectIdGetDatum(aggfnoid));
+ if (!HeapTupleIsValid(aggTuple))
+ return InvalidOid;
+ aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple);
+ aggsortop = aggform->aggsortop;
+ ReleaseSysCache(aggTuple);
+
+ return aggsortop;
+}
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
new file mode 100644
index 0000000..5a1d006
--- /dev/null
+++ b/src/backend/optimizer/plan/planmain.c
@@ -0,0 +1,284 @@
+/*-------------------------------------------------------------------------
+ *
+ * planmain.c
+ * Routines to plan a single query
+ *
+ * What's in a name, anyway? The top-level entry point of the planner/
+ * optimizer is over in planner.c, not here as you might think from the
+ * file name. But this is the main code for planning a basic join operation,
+ * shorn of features like subselects, inheritance, aggregates, grouping,
+ * and so on. (Those are the things planner.c deals with.)
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/plan/planmain.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "optimizer/appendinfo.h"
+#include "optimizer/clauses.h"
+#include "optimizer/inherit.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/orclauses.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/placeholder.h"
+#include "optimizer/planmain.h"
+
+
+/*
+ * query_planner
+ * Generate a path (that is, a simplified plan) for a basic query,
+ * which may involve joins but not any fancier features.
+ *
+ * Since query_planner does not handle the toplevel processing (grouping,
+ * sorting, etc) it cannot select the best path by itself. Instead, it
+ * returns the RelOptInfo for the top level of joining, and the caller
+ * (grouping_planner) can choose among the surviving paths for the rel.
+ *
+ * root describes the query to plan
+ * qp_callback is a function to compute query_pathkeys once it's safe to do so
+ * qp_extra is optional extra data to pass to qp_callback
+ *
+ * Note: the PlannerInfo node also includes a query_pathkeys field, which
+ * tells query_planner the sort order that is desired in the final output
+ * plan. This value is *not* available at call time, but is computed by
+ * qp_callback once we have completed merging the query's equivalence classes.
+ * (We cannot construct canonical pathkeys until that's done.)
+ */
+RelOptInfo *
+query_planner(PlannerInfo *root,
+ query_pathkeys_callback qp_callback, void *qp_extra)
+{
+ Query *parse = root->parse;
+ List *joinlist;
+ RelOptInfo *final_rel;
+
+ /*
+ * Init planner lists to empty.
+ *
+ * NOTE: append_rel_list was set up by subquery_planner, so do not touch
+ * here.
+ */
+ root->join_rel_list = NIL;
+ root->join_rel_hash = NULL;
+ root->join_rel_level = NULL;
+ root->join_cur_level = 0;
+ root->canon_pathkeys = NIL;
+ root->left_join_clauses = NIL;
+ root->right_join_clauses = NIL;
+ root->full_join_clauses = NIL;
+ root->join_info_list = NIL;
+ root->placeholder_list = NIL;
+ root->fkey_list = NIL;
+ root->initial_rels = NIL;
+
+ /*
+ * Set up arrays for accessing base relations and AppendRelInfos.
+ */
+ setup_simple_rel_arrays(root);
+
+ /*
+ * In the trivial case where the jointree is a single RTE_RESULT relation,
+ * bypass all the rest of this function and just make a RelOptInfo and its
+ * one access path. This is worth optimizing because it applies for
+ * common cases like "SELECT expression" and "INSERT ... VALUES()".
+ */
+ Assert(parse->jointree->fromlist != NIL);
+ if (list_length(parse->jointree->fromlist) == 1)
+ {
+ Node *jtnode = (Node *) linitial(parse->jointree->fromlist);
+
+ if (IsA(jtnode, RangeTblRef))
+ {
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+ RangeTblEntry *rte = root->simple_rte_array[varno];
+
+ Assert(rte != NULL);
+ if (rte->rtekind == RTE_RESULT)
+ {
+ /* Make the RelOptInfo for it directly */
+ final_rel = build_simple_rel(root, varno, NULL);
+
+ /*
+ * If query allows parallelism in general, check whether the
+ * quals are parallel-restricted. (We need not check
+ * final_rel->reltarget because it's empty at this point.
+ * Anything parallel-restricted in the query tlist will be
+ * dealt with later.) This is normally pretty silly, because
+ * a Result-only plan would never be interesting to
+ * parallelize. However, if force_parallel_mode is on, then
+ * we want to execute the Result in a parallel worker if
+ * possible, so we must do this.
+ */
+ if (root->glob->parallelModeOK &&
+ force_parallel_mode != FORCE_PARALLEL_OFF)
+ final_rel->consider_parallel =
+ is_parallel_safe(root, parse->jointree->quals);
+
+ /*
+ * The only path for it is a trivial Result path. We cheat a
+ * bit here by using a GroupResultPath, because that way we
+ * can just jam the quals into it without preprocessing them.
+ * (But, if you hold your head at the right angle, a FROM-less
+ * SELECT is a kind of degenerate-grouping case, so it's not
+ * that much of a cheat.)
+ */
+ add_path(final_rel, (Path *)
+ create_group_result_path(root, final_rel,
+ final_rel->reltarget,
+ (List *) parse->jointree->quals));
+
+ /* Select cheapest path (pretty easy in this case...) */
+ set_cheapest(final_rel);
+
+ /*
+ * We don't need to run generate_base_implied_equalities, but
+ * we do need to pretend that EC merging is complete.
+ */
+ root->ec_merging_done = true;
+
+ /*
+ * We still are required to call qp_callback, in case it's
+ * something like "SELECT 2+2 ORDER BY 1".
+ */
+ (*qp_callback) (root, qp_extra);
+
+ return final_rel;
+ }
+ }
+ }
+
+ /*
+ * Construct RelOptInfo nodes for all base relations used in the query.
+ * Appendrel member relations ("other rels") will be added later.
+ *
+ * Note: the reason we find the baserels by searching the jointree, rather
+ * than scanning the rangetable, is that the rangetable may contain RTEs
+ * for rels not actively part of the query, for example views. We don't
+ * want to make RelOptInfos for them.
+ */
+ add_base_rels_to_query(root, (Node *) parse->jointree);
+
+ /*
+ * Examine the targetlist and join tree, adding entries to baserel
+ * targetlists for all referenced Vars, and generating PlaceHolderInfo
+ * entries for all referenced PlaceHolderVars. Restrict and join clauses
+ * are added to appropriate lists belonging to the mentioned relations. We
+ * also build EquivalenceClasses for provably equivalent expressions. The
+ * SpecialJoinInfo list is also built to hold information about join order
+ * restrictions. Finally, we form a target joinlist for make_one_rel() to
+ * work from.
+ */
+ build_base_rel_tlists(root, root->processed_tlist);
+
+ find_placeholders_in_jointree(root);
+
+ find_lateral_references(root);
+
+ joinlist = deconstruct_jointree(root);
+
+ /*
+ * Reconsider any postponed outer-join quals now that we have built up
+ * equivalence classes. (This could result in further additions or
+ * mergings of classes.)
+ */
+ reconsider_outer_join_clauses(root);
+
+ /*
+ * If we formed any equivalence classes, generate additional restriction
+ * clauses as appropriate. (Implied join clauses are formed on-the-fly
+ * later.)
+ */
+ generate_base_implied_equalities(root);
+
+ /*
+ * We have completed merging equivalence sets, so it's now possible to
+ * generate pathkeys in canonical form; so compute query_pathkeys and
+ * other pathkeys fields in PlannerInfo.
+ */
+ (*qp_callback) (root, qp_extra);
+
+ /*
+ * Examine any "placeholder" expressions generated during subquery pullup.
+ * Make sure that the Vars they need are marked as needed at the relevant
+ * join level. This must be done before join removal because it might
+ * cause Vars or placeholders to be needed above a join when they weren't
+ * so marked before.
+ */
+ fix_placeholder_input_needed_levels(root);
+
+ /*
+ * Remove any useless outer joins. Ideally this would be done during
+ * jointree preprocessing, but the necessary information isn't available
+ * until we've built baserel data structures and classified qual clauses.
+ */
+ joinlist = remove_useless_joins(root, joinlist);
+
+ /*
+ * Also, reduce any semijoins with unique inner rels to plain inner joins.
+ * Likewise, this can't be done until now for lack of needed info.
+ */
+ reduce_unique_semijoins(root);
+
+ /*
+ * Now distribute "placeholders" to base rels as needed. This has to be
+ * done after join removal because removal could change whether a
+ * placeholder is evaluable at a base rel.
+ */
+ add_placeholders_to_base_rels(root);
+
+ /*
+ * Construct the lateral reference sets now that we have finalized
+ * PlaceHolderVar eval levels.
+ */
+ create_lateral_join_info(root);
+
+ /*
+ * Match foreign keys to equivalence classes and join quals. This must be
+ * done after finalizing equivalence classes, and it's useful to wait till
+ * after join removal so that we can skip processing foreign keys
+ * involving removed relations.
+ */
+ match_foreign_keys_to_quals(root);
+
+ /*
+ * Look for join OR clauses that we can extract single-relation
+ * restriction OR clauses from.
+ */
+ extract_restriction_or_clauses(root);
+
+ /*
+ * Now expand appendrels by adding "otherrels" for their children. We
+ * delay this to the end so that we have as much information as possible
+ * available for each baserel, including all restriction clauses. That
+ * let us prune away partitions that don't satisfy a restriction clause.
+ * Also note that some information such as lateral_relids is propagated
+ * from baserels to otherrels here, so we must have computed it already.
+ */
+ add_other_rels_to_query(root);
+
+ /*
+ * Distribute any UPDATE/DELETE/MERGE row identity variables to the target
+ * relations. This can't be done till we've finished expansion of
+ * appendrels.
+ */
+ distribute_row_identity_vars(root);
+
+ /*
+ * Ready to do the primary planning.
+ */
+ final_rel = make_one_rel(root, joinlist);
+
+ /* Check that we got at least one usable path */
+ if (!final_rel || !final_rel->cheapest_total_path ||
+ final_rel->cheapest_total_path->param_info != NULL)
+ elog(ERROR, "failed to construct the join relation");
+
+ return final_rel;
+}
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
new file mode 100644
index 0000000..bd4e4ce
--- /dev/null
+++ b/src/backend/optimizer/plan/planner.c
@@ -0,0 +1,7492 @@
+/*-------------------------------------------------------------------------
+ *
+ * planner.c
+ * The query optimizer external interface.
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/plan/planner.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include <limits.h>
+#include <math.h>
+
+#include "access/genam.h"
+#include "access/htup_details.h"
+#include "access/parallel.h"
+#include "access/sysattr.h"
+#include "access/table.h"
+#include "access/xact.h"
+#include "catalog/pg_constraint.h"
+#include "catalog/pg_inherits.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
+#include "executor/executor.h"
+#include "executor/nodeAgg.h"
+#include "foreign/fdwapi.h"
+#include "jit/jit.h"
+#include "lib/bipartite_match.h"
+#include "lib/knapsack.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#ifdef OPTIMIZER_DEBUG
+#include "nodes/print.h"
+#endif
+#include "optimizer/appendinfo.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/inherit.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/paramassign.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/plancat.h"
+#include "optimizer/planmain.h"
+#include "optimizer/planner.h"
+#include "optimizer/prep.h"
+#include "optimizer/subselect.h"
+#include "optimizer/tlist.h"
+#include "parser/analyze.h"
+#include "parser/parse_agg.h"
+#include "parser/parsetree.h"
+#include "partitioning/partdesc.h"
+#include "rewrite/rewriteManip.h"
+#include "storage/dsm_impl.h"
+#include "utils/lsyscache.h"
+#include "utils/rel.h"
+#include "utils/selfuncs.h"
+#include "utils/syscache.h"
+
+/* GUC parameters */
+double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
+int force_parallel_mode = FORCE_PARALLEL_OFF;
+bool parallel_leader_participation = true;
+
+/* Hook for plugins to get control in planner() */
+planner_hook_type planner_hook = NULL;
+
+/* Hook for plugins to get control when grouping_planner() plans upper rels */
+create_upper_paths_hook_type create_upper_paths_hook = NULL;
+
+
+/* Expression kind codes for preprocess_expression */
+#define EXPRKIND_QUAL 0
+#define EXPRKIND_TARGET 1
+#define EXPRKIND_RTFUNC 2
+#define EXPRKIND_RTFUNC_LATERAL 3
+#define EXPRKIND_VALUES 4
+#define EXPRKIND_VALUES_LATERAL 5
+#define EXPRKIND_LIMIT 6
+#define EXPRKIND_APPINFO 7
+#define EXPRKIND_PHV 8
+#define EXPRKIND_TABLESAMPLE 9
+#define EXPRKIND_ARBITER_ELEM 10
+#define EXPRKIND_TABLEFUNC 11
+#define EXPRKIND_TABLEFUNC_LATERAL 12
+
+/* Passthrough data for standard_qp_callback */
+typedef struct
+{
+ List *activeWindows; /* active windows, if any */
+ List *groupClause; /* overrides parse->groupClause */
+} standard_qp_extra;
+
+/*
+ * Data specific to grouping sets
+ */
+
+typedef struct
+{
+ List *rollups;
+ List *hash_sets_idx;
+ double dNumHashGroups;
+ bool any_hashable;
+ Bitmapset *unsortable_refs;
+ Bitmapset *unhashable_refs;
+ List *unsortable_sets;
+ int *tleref_to_colnum_map;
+} grouping_sets_data;
+
+/*
+ * Temporary structure for use during WindowClause reordering in order to be
+ * able to sort WindowClauses on partitioning/ordering prefix.
+ */
+typedef struct
+{
+ WindowClause *wc;
+ List *uniqueOrder; /* A List of unique ordering/partitioning
+ * clauses per Window */
+} WindowClauseSortData;
+
+/* Local functions */
+static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
+static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
+static void grouping_planner(PlannerInfo *root, double tuple_fraction);
+static grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
+static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
+ int *tleref_to_colnum_map);
+static void preprocess_rowmarks(PlannerInfo *root);
+static double preprocess_limit(PlannerInfo *root,
+ double tuple_fraction,
+ int64 *offset_est, int64 *count_est);
+static void remove_useless_groupby_columns(PlannerInfo *root);
+static List *preprocess_groupclause(PlannerInfo *root, List *force);
+static List *extract_rollup_sets(List *groupingSets);
+static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
+static void standard_qp_callback(PlannerInfo *root, void *extra);
+static double get_number_of_groups(PlannerInfo *root,
+ double path_rows,
+ grouping_sets_data *gd,
+ List *target_list);
+static RelOptInfo *create_grouping_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ PathTarget *target,
+ bool target_parallel_safe,
+ grouping_sets_data *gd);
+static bool is_degenerate_grouping(PlannerInfo *root);
+static void create_degenerate_grouping_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ RelOptInfo *grouped_rel);
+static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
+ PathTarget *target, bool target_parallel_safe,
+ Node *havingQual);
+static void create_ordinary_grouping_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ RelOptInfo *grouped_rel,
+ const AggClauseCosts *agg_costs,
+ grouping_sets_data *gd,
+ GroupPathExtraData *extra,
+ RelOptInfo **partially_grouped_rel_p);
+static void consider_groupingsets_paths(PlannerInfo *root,
+ RelOptInfo *grouped_rel,
+ Path *path,
+ bool is_sorted,
+ bool can_hash,
+ grouping_sets_data *gd,
+ const AggClauseCosts *agg_costs,
+ double dNumGroups);
+static RelOptInfo *create_window_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ PathTarget *input_target,
+ PathTarget *output_target,
+ bool output_target_parallel_safe,
+ WindowFuncLists *wflists,
+ List *activeWindows);
+static void create_one_window_path(PlannerInfo *root,
+ RelOptInfo *window_rel,
+ Path *path,
+ PathTarget *input_target,
+ PathTarget *output_target,
+ WindowFuncLists *wflists,
+ List *activeWindows);
+static RelOptInfo *create_distinct_paths(PlannerInfo *root,
+ RelOptInfo *input_rel);
+static void create_partial_distinct_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ RelOptInfo *final_distinct_rel);
+static RelOptInfo *create_final_distinct_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ RelOptInfo *distinct_rel);
+static RelOptInfo *create_ordered_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ PathTarget *target,
+ bool target_parallel_safe,
+ double limit_tuples);
+static PathTarget *make_group_input_target(PlannerInfo *root,
+ PathTarget *final_target);
+static PathTarget *make_partial_grouping_target(PlannerInfo *root,
+ PathTarget *grouping_target,
+ Node *havingQual);
+static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
+static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
+static PathTarget *make_window_input_target(PlannerInfo *root,
+ PathTarget *final_target,
+ List *activeWindows);
+static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
+ List *tlist);
+static PathTarget *make_sort_input_target(PlannerInfo *root,
+ PathTarget *final_target,
+ bool *have_postponed_srfs);
+static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
+ List *targets, List *targets_contain_srfs);
+static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
+ RelOptInfo *grouped_rel,
+ RelOptInfo *partially_grouped_rel,
+ const AggClauseCosts *agg_costs,
+ grouping_sets_data *gd,
+ double dNumGroups,
+ GroupPathExtraData *extra);
+static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root,
+ RelOptInfo *grouped_rel,
+ RelOptInfo *input_rel,
+ grouping_sets_data *gd,
+ GroupPathExtraData *extra,
+ bool force_rel_creation);
+static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel);
+static bool can_partial_agg(PlannerInfo *root);
+static void apply_scanjoin_target_to_paths(PlannerInfo *root,
+ RelOptInfo *rel,
+ List *scanjoin_targets,
+ List *scanjoin_targets_contain_srfs,
+ bool scanjoin_target_parallel_safe,
+ bool tlist_same_exprs);
+static void create_partitionwise_grouping_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ RelOptInfo *grouped_rel,
+ RelOptInfo *partially_grouped_rel,
+ const AggClauseCosts *agg_costs,
+ grouping_sets_data *gd,
+ PartitionwiseAggregateType patype,
+ GroupPathExtraData *extra);
+static bool group_by_has_partkey(RelOptInfo *input_rel,
+ List *targetList,
+ List *groupClause);
+static int common_prefix_cmp(const void *a, const void *b);
+
+
+/*****************************************************************************
+ *
+ * Query optimizer entry point
+ *
+ * To support loadable plugins that monitor or modify planner behavior,
+ * we provide a hook variable that lets a plugin get control before and
+ * after the standard planning process. The plugin would normally call
+ * standard_planner().
+ *
+ * Note to plugin authors: standard_planner() scribbles on its Query input,
+ * so you'd better copy that data structure if you want to plan more than once.
+ *
+ *****************************************************************************/
+PlannedStmt *
+planner(Query *parse, const char *query_string, int cursorOptions,
+ ParamListInfo boundParams)
+{
+ PlannedStmt *result;
+
+ if (planner_hook)
+ result = (*planner_hook) (parse, query_string, cursorOptions, boundParams);
+ else
+ result = standard_planner(parse, query_string, cursorOptions, boundParams);
+ return result;
+}
+
+PlannedStmt *
+standard_planner(Query *parse, const char *query_string, int cursorOptions,
+ ParamListInfo boundParams)
+{
+ PlannedStmt *result;
+ PlannerGlobal *glob;
+ double tuple_fraction;
+ PlannerInfo *root;
+ RelOptInfo *final_rel;
+ Path *best_path;
+ Plan *top_plan;
+ ListCell *lp,
+ *lr;
+
+ /*
+ * Set up global state for this planner invocation. This data is needed
+ * across all levels of sub-Query that might exist in the given command,
+ * so we keep it in a separate struct that's linked to by each per-Query
+ * PlannerInfo.
+ */
+ glob = makeNode(PlannerGlobal);
+
+ glob->boundParams = boundParams;
+ glob->subplans = NIL;
+ glob->subroots = NIL;
+ glob->rewindPlanIDs = NULL;
+ glob->finalrtable = NIL;
+ glob->finalrowmarks = NIL;
+ glob->resultRelations = NIL;
+ glob->appendRelations = NIL;
+ glob->relationOids = NIL;
+ glob->invalItems = NIL;
+ glob->paramExecTypes = NIL;
+ glob->lastPHId = 0;
+ glob->lastRowMarkId = 0;
+ glob->lastPlanNodeId = 0;
+ glob->transientPlan = false;
+ glob->dependsOnRole = false;
+
+ /*
+ * Assess whether it's feasible to use parallel mode for this query. We
+ * can't do this in a standalone backend, or if the command will try to
+ * modify any data, or if this is a cursor operation, or if GUCs are set
+ * to values that don't permit parallelism, or if parallel-unsafe
+ * functions are present in the query tree.
+ *
+ * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
+ * MATERIALIZED VIEW to use parallel plans, but this is safe only because
+ * the command is writing into a completely new table which workers won't
+ * be able to see. If the workers could see the table, the fact that
+ * group locking would cause them to ignore the leader's heavyweight
+ * GIN page locks would make this unsafe. We'll have to fix that somehow
+ * if we want to allow parallel inserts in general; updates and deletes
+ * have additional problems especially around combo CIDs.)
+ *
+ * For now, we don't try to use parallel mode if we're running inside a
+ * parallel worker. We might eventually be able to relax this
+ * restriction, but for now it seems best not to have parallel workers
+ * trying to create their own parallel workers.
+ */
+ if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
+ IsUnderPostmaster &&
+ parse->commandType == CMD_SELECT &&
+ !parse->hasModifyingCTE &&
+ max_parallel_workers_per_gather > 0 &&
+ !IsParallelWorker())
+ {
+ /* all the cheap tests pass, so scan the query tree */
+ glob->maxParallelHazard = max_parallel_hazard(parse);
+ glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
+ }
+ else
+ {
+ /* skip the query tree scan, just assume it's unsafe */
+ glob->maxParallelHazard = PROPARALLEL_UNSAFE;
+ glob->parallelModeOK = false;
+ }
+
+ /*
+ * glob->parallelModeNeeded is normally set to false here and changed to
+ * true during plan creation if a Gather or Gather Merge plan is actually
+ * created (cf. create_gather_plan, create_gather_merge_plan).
+ *
+ * However, if force_parallel_mode = on or force_parallel_mode = regress,
+ * then we impose parallel mode whenever it's safe to do so, even if the
+ * final plan doesn't use parallelism. It's not safe to do so if the
+ * query contains anything parallel-unsafe; parallelModeOK will be false
+ * in that case. Note that parallelModeOK can't change after this point.
+ * Otherwise, everything in the query is either parallel-safe or
+ * parallel-restricted, and in either case it should be OK to impose
+ * parallel-mode restrictions. If that ends up breaking something, then
+ * either some function the user included in the query is incorrectly
+ * labeled as parallel-safe or parallel-restricted when in reality it's
+ * parallel-unsafe, or else the query planner itself has a bug.
+ */
+ glob->parallelModeNeeded = glob->parallelModeOK &&
+ (force_parallel_mode != FORCE_PARALLEL_OFF);
+
+ /* Determine what fraction of the plan is likely to be scanned */
+ if (cursorOptions & CURSOR_OPT_FAST_PLAN)
+ {
+ /*
+ * We have no real idea how many tuples the user will ultimately FETCH
+ * from a cursor, but it is often the case that he doesn't want 'em
+ * all, or would prefer a fast-start plan anyway so that he can
+ * process some of the tuples sooner. Use a GUC parameter to decide
+ * what fraction to optimize for.
+ */
+ tuple_fraction = cursor_tuple_fraction;
+
+ /*
+ * We document cursor_tuple_fraction as simply being a fraction, which
+ * means the edge cases 0 and 1 have to be treated specially here. We
+ * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
+ */
+ if (tuple_fraction >= 1.0)
+ tuple_fraction = 0.0;
+ else if (tuple_fraction <= 0.0)
+ tuple_fraction = 1e-10;
+ }
+ else
+ {
+ /* Default assumption is we need all the tuples */
+ tuple_fraction = 0.0;
+ }
+
+ /* primary planning entry point (may recurse for subqueries) */
+ root = subquery_planner(glob, parse, NULL,
+ false, tuple_fraction);
+
+ /* Select best Path and turn it into a Plan */
+ final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
+ best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
+
+ top_plan = create_plan(root, best_path);
+
+ /*
+ * If creating a plan for a scrollable cursor, make sure it can run
+ * backwards on demand. Add a Material node at the top at need.
+ */
+ if (cursorOptions & CURSOR_OPT_SCROLL)
+ {
+ if (!ExecSupportsBackwardScan(top_plan))
+ top_plan = materialize_finished_plan(top_plan);
+ }
+
+ /*
+ * Optionally add a Gather node for testing purposes, provided this is
+ * actually a safe thing to do.
+ */
+ if (force_parallel_mode != FORCE_PARALLEL_OFF && top_plan->parallel_safe)
+ {
+ Gather *gather = makeNode(Gather);
+
+ /*
+ * Top plan must not have any initPlans, else it shouldn't have been
+ * marked parallel-safe.
+ */
+ Assert(top_plan->initPlan == NIL);
+
+ gather->plan.targetlist = top_plan->targetlist;
+ gather->plan.qual = NIL;
+ gather->plan.lefttree = top_plan;
+ gather->plan.righttree = NULL;
+ gather->num_workers = 1;
+ gather->single_copy = true;
+ gather->invisible = (force_parallel_mode == FORCE_PARALLEL_REGRESS);
+
+ /*
+ * Since this Gather has no parallel-aware descendants to signal to,
+ * we don't need a rescan Param.
+ */
+ gather->rescan_param = -1;
+
+ /*
+ * Ideally we'd use cost_gather here, but setting up dummy path data
+ * to satisfy it doesn't seem much cleaner than knowing what it does.
+ */
+ gather->plan.startup_cost = top_plan->startup_cost +
+ parallel_setup_cost;
+ gather->plan.total_cost = top_plan->total_cost +
+ parallel_setup_cost + parallel_tuple_cost * top_plan->plan_rows;
+ gather->plan.plan_rows = top_plan->plan_rows;
+ gather->plan.plan_width = top_plan->plan_width;
+ gather->plan.parallel_aware = false;
+ gather->plan.parallel_safe = false;
+
+ /* use parallel mode for parallel plans. */
+ root->glob->parallelModeNeeded = true;
+
+ top_plan = &gather->plan;
+ }
+
+ /*
+ * If any Params were generated, run through the plan tree and compute
+ * each plan node's extParam/allParam sets. Ideally we'd merge this into
+ * set_plan_references' tree traversal, but for now it has to be separate
+ * because we need to visit subplans before not after main plan.
+ */
+ if (glob->paramExecTypes != NIL)
+ {
+ Assert(list_length(glob->subplans) == list_length(glob->subroots));
+ forboth(lp, glob->subplans, lr, glob->subroots)
+ {
+ Plan *subplan = (Plan *) lfirst(lp);
+ PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
+
+ SS_finalize_plan(subroot, subplan);
+ }
+ SS_finalize_plan(root, top_plan);
+ }
+
+ /* final cleanup of the plan */
+ Assert(glob->finalrtable == NIL);
+ Assert(glob->finalrowmarks == NIL);
+ Assert(glob->resultRelations == NIL);
+ Assert(glob->appendRelations == NIL);
+ top_plan = set_plan_references(root, top_plan);
+ /* ... and the subplans (both regular subplans and initplans) */
+ Assert(list_length(glob->subplans) == list_length(glob->subroots));
+ forboth(lp, glob->subplans, lr, glob->subroots)
+ {
+ Plan *subplan = (Plan *) lfirst(lp);
+ PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
+
+ lfirst(lp) = set_plan_references(subroot, subplan);
+ }
+
+ /* build the PlannedStmt result */
+ result = makeNode(PlannedStmt);
+
+ result->commandType = parse->commandType;
+ result->queryId = parse->queryId;
+ result->hasReturning = (parse->returningList != NIL);
+ result->hasModifyingCTE = parse->hasModifyingCTE;
+ result->canSetTag = parse->canSetTag;
+ result->transientPlan = glob->transientPlan;
+ result->dependsOnRole = glob->dependsOnRole;
+ result->parallelModeNeeded = glob->parallelModeNeeded;
+ result->planTree = top_plan;
+ result->rtable = glob->finalrtable;
+ result->resultRelations = glob->resultRelations;
+ result->appendRelations = glob->appendRelations;
+ result->subplans = glob->subplans;
+ result->rewindPlanIDs = glob->rewindPlanIDs;
+ result->rowMarks = glob->finalrowmarks;
+ result->relationOids = glob->relationOids;
+ result->invalItems = glob->invalItems;
+ result->paramExecTypes = glob->paramExecTypes;
+ /* utilityStmt should be null, but we might as well copy it */
+ result->utilityStmt = parse->utilityStmt;
+ result->stmt_location = parse->stmt_location;
+ result->stmt_len = parse->stmt_len;
+
+ result->jitFlags = PGJIT_NONE;
+ if (jit_enabled && jit_above_cost >= 0 &&
+ top_plan->total_cost > jit_above_cost)
+ {
+ result->jitFlags |= PGJIT_PERFORM;
+
+ /*
+ * Decide how much effort should be put into generating better code.
+ */
+ if (jit_optimize_above_cost >= 0 &&
+ top_plan->total_cost > jit_optimize_above_cost)
+ result->jitFlags |= PGJIT_OPT3;
+ if (jit_inline_above_cost >= 0 &&
+ top_plan->total_cost > jit_inline_above_cost)
+ result->jitFlags |= PGJIT_INLINE;
+
+ /*
+ * Decide which operations should be JITed.
+ */
+ if (jit_expressions)
+ result->jitFlags |= PGJIT_EXPR;
+ if (jit_tuple_deforming)
+ result->jitFlags |= PGJIT_DEFORM;
+ }
+
+ if (glob->partition_directory != NULL)
+ DestroyPartitionDirectory(glob->partition_directory);
+
+ return result;
+}
+
+
+/*--------------------
+ * subquery_planner
+ * Invokes the planner on a subquery. We recurse to here for each
+ * sub-SELECT found in the query tree.
+ *
+ * glob is the global state for the current planner run.
+ * parse is the querytree produced by the parser & rewriter.
+ * parent_root is the immediate parent Query's info (NULL at the top level).
+ * hasRecursion is true if this is a recursive WITH query.
+ * tuple_fraction is the fraction of tuples we expect will be retrieved.
+ * tuple_fraction is interpreted as explained for grouping_planner, below.
+ *
+ * Basically, this routine does the stuff that should only be done once
+ * per Query object. It then calls grouping_planner. At one time,
+ * grouping_planner could be invoked recursively on the same Query object;
+ * that's not currently true, but we keep the separation between the two
+ * routines anyway, in case we need it again someday.
+ *
+ * subquery_planner will be called recursively to handle sub-Query nodes
+ * found within the query's expressions and rangetable.
+ *
+ * Returns the PlannerInfo struct ("root") that contains all data generated
+ * while planning the subquery. In particular, the Path(s) attached to
+ * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
+ * cheapest way(s) to implement the query. The top level will select the
+ * best Path and pass it through createplan.c to produce a finished Plan.
+ *--------------------
+ */
+PlannerInfo *
+subquery_planner(PlannerGlobal *glob, Query *parse,
+ PlannerInfo *parent_root,
+ bool hasRecursion, double tuple_fraction)
+{
+ PlannerInfo *root;
+ List *newWithCheckOptions;
+ List *newHaving;
+ bool hasOuterJoins;
+ bool hasResultRTEs;
+ RelOptInfo *final_rel;
+ ListCell *l;
+
+ /* Create a PlannerInfo data structure for this subquery */
+ root = makeNode(PlannerInfo);
+ root->parse = parse;
+ root->glob = glob;
+ root->query_level = parent_root ? parent_root->query_level + 1 : 1;
+ root->parent_root = parent_root;
+ root->plan_params = NIL;
+ root->outer_params = NULL;
+ root->planner_cxt = CurrentMemoryContext;
+ root->init_plans = NIL;
+ root->cte_plan_ids = NIL;
+ root->multiexpr_params = NIL;
+ root->eq_classes = NIL;
+ root->ec_merging_done = false;
+ root->all_result_relids =
+ parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
+ root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
+ root->append_rel_list = NIL;
+ root->row_identity_vars = NIL;
+ root->rowMarks = NIL;
+ memset(root->upper_rels, 0, sizeof(root->upper_rels));
+ memset(root->upper_targets, 0, sizeof(root->upper_targets));
+ root->processed_tlist = NIL;
+ root->update_colnos = NIL;
+ root->grouping_map = NULL;
+ root->minmax_aggs = NIL;
+ root->qual_security_level = 0;
+ root->hasPseudoConstantQuals = false;
+ root->hasAlternativeSubPlans = false;
+ root->hasRecursion = hasRecursion;
+ if (hasRecursion)
+ root->wt_param_id = assign_special_exec_param(root);
+ else
+ root->wt_param_id = -1;
+ root->non_recursive_path = NULL;
+ root->partColsUpdated = false;
+
+ /*
+ * If there is a WITH list, process each WITH query and either convert it
+ * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
+ */
+ if (parse->cteList)
+ SS_process_ctes(root);
+
+ /*
+ * If it's a MERGE command, transform the joinlist as appropriate.
+ */
+ transform_MERGE_to_join(parse);
+
+ /*
+ * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
+ * that we don't need so many special cases to deal with that situation.
+ */
+ replace_empty_jointree(parse);
+
+ /*
+ * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
+ * to transform them into joins. Note that this step does not descend
+ * into subqueries; if we pull up any subqueries below, their SubLinks are
+ * processed just before pulling them up.
+ */
+ if (parse->hasSubLinks)
+ pull_up_sublinks(root);
+
+ /*
+ * Scan the rangetable for function RTEs, do const-simplification on them,
+ * and then inline them if possible (producing subqueries that might get
+ * pulled up next). Recursion issues here are handled in the same way as
+ * for SubLinks.
+ */
+ preprocess_function_rtes(root);
+
+ /*
+ * Check to see if any subqueries in the jointree can be merged into this
+ * query.
+ */
+ pull_up_subqueries(root);
+
+ /*
+ * If this is a simple UNION ALL query, flatten it into an appendrel. We
+ * do this now because it requires applying pull_up_subqueries to the leaf
+ * queries of the UNION ALL, which weren't touched above because they
+ * weren't referenced by the jointree (they will be after we do this).
+ */
+ if (parse->setOperations)
+ flatten_simple_union_all(root);
+
+ /*
+ * Survey the rangetable to see what kinds of entries are present. We can
+ * skip some later processing if relevant SQL features are not used; for
+ * example if there are no JOIN RTEs we can avoid the expense of doing
+ * flatten_join_alias_vars(). This must be done after we have finished
+ * adding rangetable entries, of course. (Note: actually, processing of
+ * inherited or partitioned rels can cause RTEs for their child tables to
+ * get added later; but those must all be RTE_RELATION entries, so they
+ * don't invalidate the conclusions drawn here.)
+ */
+ root->hasJoinRTEs = false;
+ root->hasLateralRTEs = false;
+ hasOuterJoins = false;
+ hasResultRTEs = false;
+ foreach(l, parse->rtable)
+ {
+ RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
+
+ switch (rte->rtekind)
+ {
+ case RTE_RELATION:
+ if (rte->inh)
+ {
+ /*
+ * Check to see if the relation actually has any children;
+ * if not, clear the inh flag so we can treat it as a
+ * plain base relation.
+ *
+ * Note: this could give a false-positive result, if the
+ * rel once had children but no longer does. We used to
+ * be able to clear rte->inh later on when we discovered
+ * that, but no more; we have to handle such cases as
+ * full-fledged inheritance.
+ */
+ rte->inh = has_subclass(rte->relid);
+ }
+ break;
+ case RTE_JOIN:
+ root->hasJoinRTEs = true;
+ if (IS_OUTER_JOIN(rte->jointype))
+ hasOuterJoins = true;
+ break;
+ case RTE_RESULT:
+ hasResultRTEs = true;
+ break;
+ default:
+ /* No work here for other RTE types */
+ break;
+ }
+
+ if (rte->lateral)
+ root->hasLateralRTEs = true;
+
+ /*
+ * We can also determine the maximum security level required for any
+ * securityQuals now. Addition of inheritance-child RTEs won't affect
+ * this, because child tables don't have their own securityQuals; see
+ * expand_single_inheritance_child().
+ */
+ if (rte->securityQuals)
+ root->qual_security_level = Max(root->qual_security_level,
+ list_length(rte->securityQuals));
+ }
+
+ /*
+ * If we have now verified that the query target relation is
+ * non-inheriting, mark it as a leaf target.
+ */
+ if (parse->resultRelation)
+ {
+ RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
+
+ if (!rte->inh)
+ root->leaf_result_relids =
+ bms_make_singleton(parse->resultRelation);
+ }
+
+ /*
+ * Preprocess RowMark information. We need to do this after subquery
+ * pullup, so that all base relations are present.
+ */
+ preprocess_rowmarks(root);
+
+ /*
+ * Set hasHavingQual to remember if HAVING clause is present. Needed
+ * because preprocess_expression will reduce a constant-true condition to
+ * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
+ */
+ root->hasHavingQual = (parse->havingQual != NULL);
+
+ /*
+ * Do expression preprocessing on targetlist and quals, as well as other
+ * random expressions in the querytree. Note that we do not need to
+ * handle sort/group expressions explicitly, because they are actually
+ * part of the targetlist.
+ */
+ parse->targetList = (List *)
+ preprocess_expression(root, (Node *) parse->targetList,
+ EXPRKIND_TARGET);
+
+ /* Constant-folding might have removed all set-returning functions */
+ if (parse->hasTargetSRFs)
+ parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
+
+ newWithCheckOptions = NIL;
+ foreach(l, parse->withCheckOptions)
+ {
+ WithCheckOption *wco = lfirst_node(WithCheckOption, l);
+
+ wco->qual = preprocess_expression(root, wco->qual,
+ EXPRKIND_QUAL);
+ if (wco->qual != NULL)
+ newWithCheckOptions = lappend(newWithCheckOptions, wco);
+ }
+ parse->withCheckOptions = newWithCheckOptions;
+
+ parse->returningList = (List *)
+ preprocess_expression(root, (Node *) parse->returningList,
+ EXPRKIND_TARGET);
+
+ preprocess_qual_conditions(root, (Node *) parse->jointree);
+
+ parse->havingQual = preprocess_expression(root, parse->havingQual,
+ EXPRKIND_QUAL);
+
+ foreach(l, parse->windowClause)
+ {
+ WindowClause *wc = lfirst_node(WindowClause, l);
+
+ /* partitionClause/orderClause are sort/group expressions */
+ wc->startOffset = preprocess_expression(root, wc->startOffset,
+ EXPRKIND_LIMIT);
+ wc->endOffset = preprocess_expression(root, wc->endOffset,
+ EXPRKIND_LIMIT);
+ wc->runCondition = (List *) preprocess_expression(root,
+ (Node *) wc->runCondition,
+ EXPRKIND_TARGET);
+ }
+
+ parse->limitOffset = preprocess_expression(root, parse->limitOffset,
+ EXPRKIND_LIMIT);
+ parse->limitCount = preprocess_expression(root, parse->limitCount,
+ EXPRKIND_LIMIT);
+
+ if (parse->onConflict)
+ {
+ parse->onConflict->arbiterElems = (List *)
+ preprocess_expression(root,
+ (Node *) parse->onConflict->arbiterElems,
+ EXPRKIND_ARBITER_ELEM);
+ parse->onConflict->arbiterWhere =
+ preprocess_expression(root,
+ parse->onConflict->arbiterWhere,
+ EXPRKIND_QUAL);
+ parse->onConflict->onConflictSet = (List *)
+ preprocess_expression(root,
+ (Node *) parse->onConflict->onConflictSet,
+ EXPRKIND_TARGET);
+ parse->onConflict->onConflictWhere =
+ preprocess_expression(root,
+ parse->onConflict->onConflictWhere,
+ EXPRKIND_QUAL);
+ /* exclRelTlist contains only Vars, so no preprocessing needed */
+ }
+
+ foreach(l, parse->mergeActionList)
+ {
+ MergeAction *action = (MergeAction *) lfirst(l);
+
+ action->targetList = (List *)
+ preprocess_expression(root,
+ (Node *) action->targetList,
+ EXPRKIND_TARGET);
+ action->qual =
+ preprocess_expression(root,
+ (Node *) action->qual,
+ EXPRKIND_QUAL);
+ }
+
+ root->append_rel_list = (List *)
+ preprocess_expression(root, (Node *) root->append_rel_list,
+ EXPRKIND_APPINFO);
+
+ /* Also need to preprocess expressions within RTEs */
+ foreach(l, parse->rtable)
+ {
+ RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
+ int kind;
+ ListCell *lcsq;
+
+ if (rte->rtekind == RTE_RELATION)
+ {
+ if (rte->tablesample)
+ rte->tablesample = (TableSampleClause *)
+ preprocess_expression(root,
+ (Node *) rte->tablesample,
+ EXPRKIND_TABLESAMPLE);
+ }
+ else if (rte->rtekind == RTE_SUBQUERY)
+ {
+ /*
+ * We don't want to do all preprocessing yet on the subquery's
+ * expressions, since that will happen when we plan it. But if it
+ * contains any join aliases of our level, those have to get
+ * expanded now, because planning of the subquery won't do it.
+ * That's only possible if the subquery is LATERAL.
+ */
+ if (rte->lateral && root->hasJoinRTEs)
+ rte->subquery = (Query *)
+ flatten_join_alias_vars(root->parse,
+ (Node *) rte->subquery);
+ }
+ else if (rte->rtekind == RTE_FUNCTION)
+ {
+ /* Preprocess the function expression(s) fully */
+ kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
+ rte->functions = (List *)
+ preprocess_expression(root, (Node *) rte->functions, kind);
+ }
+ else if (rte->rtekind == RTE_TABLEFUNC)
+ {
+ /* Preprocess the function expression(s) fully */
+ kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
+ rte->tablefunc = (TableFunc *)
+ preprocess_expression(root, (Node *) rte->tablefunc, kind);
+ }
+ else if (rte->rtekind == RTE_VALUES)
+ {
+ /* Preprocess the values lists fully */
+ kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
+ rte->values_lists = (List *)
+ preprocess_expression(root, (Node *) rte->values_lists, kind);
+ }
+
+ /*
+ * Process each element of the securityQuals list as if it were a
+ * separate qual expression (as indeed it is). We need to do it this
+ * way to get proper canonicalization of AND/OR structure. Note that
+ * this converts each element into an implicit-AND sublist.
+ */
+ foreach(lcsq, rte->securityQuals)
+ {
+ lfirst(lcsq) = preprocess_expression(root,
+ (Node *) lfirst(lcsq),
+ EXPRKIND_QUAL);
+ }
+ }
+
+ /*
+ * Now that we are done preprocessing expressions, and in particular done
+ * flattening join alias variables, get rid of the joinaliasvars lists.
+ * They no longer match what expressions in the rest of the tree look
+ * like, because we have not preprocessed expressions in those lists (and
+ * do not want to; for example, expanding a SubLink there would result in
+ * a useless unreferenced subplan). Leaving them in place simply creates
+ * a hazard for later scans of the tree. We could try to prevent that by
+ * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
+ * but that doesn't sound very reliable.
+ */
+ if (root->hasJoinRTEs)
+ {
+ foreach(l, parse->rtable)
+ {
+ RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
+
+ rte->joinaliasvars = NIL;
+ }
+ }
+
+ /*
+ * In some cases we may want to transfer a HAVING clause into WHERE. We
+ * cannot do so if the HAVING clause contains aggregates (obviously) or
+ * volatile functions (since a HAVING clause is supposed to be executed
+ * only once per group). We also can't do this if there are any nonempty
+ * grouping sets; moving such a clause into WHERE would potentially change
+ * the results, if any referenced column isn't present in all the grouping
+ * sets. (If there are only empty grouping sets, then the HAVING clause
+ * must be degenerate as discussed below.)
+ *
+ * Also, it may be that the clause is so expensive to execute that we're
+ * better off doing it only once per group, despite the loss of
+ * selectivity. This is hard to estimate short of doing the entire
+ * planning process twice, so we use a heuristic: clauses containing
+ * subplans are left in HAVING. Otherwise, we move or copy the HAVING
+ * clause into WHERE, in hopes of eliminating tuples before aggregation
+ * instead of after.
+ *
+ * If the query has explicit grouping then we can simply move such a
+ * clause into WHERE; any group that fails the clause will not be in the
+ * output because none of its tuples will reach the grouping or
+ * aggregation stage. Otherwise we must have a degenerate (variable-free)
+ * HAVING clause, which we put in WHERE so that query_planner() can use it
+ * in a gating Result node, but also keep in HAVING to ensure that we
+ * don't emit a bogus aggregated row. (This could be done better, but it
+ * seems not worth optimizing.)
+ *
+ * Note that both havingQual and parse->jointree->quals are in
+ * implicitly-ANDed-list form at this point, even though they are declared
+ * as Node *.
+ */
+ newHaving = NIL;
+ foreach(l, (List *) parse->havingQual)
+ {
+ Node *havingclause = (Node *) lfirst(l);
+
+ if ((parse->groupClause && parse->groupingSets) ||
+ contain_agg_clause(havingclause) ||
+ contain_volatile_functions(havingclause) ||
+ contain_subplans(havingclause))
+ {
+ /* keep it in HAVING */
+ newHaving = lappend(newHaving, havingclause);
+ }
+ else if (parse->groupClause && !parse->groupingSets)
+ {
+ /* move it to WHERE */
+ parse->jointree->quals = (Node *)
+ lappend((List *) parse->jointree->quals, havingclause);
+ }
+ else
+ {
+ /* put a copy in WHERE, keep it in HAVING */
+ parse->jointree->quals = (Node *)
+ lappend((List *) parse->jointree->quals,
+ copyObject(havingclause));
+ newHaving = lappend(newHaving, havingclause);
+ }
+ }
+ parse->havingQual = (Node *) newHaving;
+
+ /* Remove any redundant GROUP BY columns */
+ remove_useless_groupby_columns(root);
+
+ /*
+ * If we have any outer joins, try to reduce them to plain inner joins.
+ * This step is most easily done after we've done expression
+ * preprocessing.
+ */
+ if (hasOuterJoins)
+ reduce_outer_joins(root);
+
+ /*
+ * If we have any RTE_RESULT relations, see if they can be deleted from
+ * the jointree. This step is most effectively done after we've done
+ * expression preprocessing and outer join reduction.
+ */
+ if (hasResultRTEs)
+ remove_useless_result_rtes(root);
+
+ /*
+ * Do the main planning.
+ */
+ grouping_planner(root, tuple_fraction);
+
+ /*
+ * Capture the set of outer-level param IDs we have access to, for use in
+ * extParam/allParam calculations later.
+ */
+ SS_identify_outer_params(root);
+
+ /*
+ * If any initPlans were created in this query level, adjust the surviving
+ * Paths' costs and parallel-safety flags to account for them. The
+ * initPlans won't actually get attached to the plan tree till
+ * create_plan() runs, but we must include their effects now.
+ */
+ final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
+ SS_charge_for_initplans(root, final_rel);
+
+ /*
+ * Make sure we've identified the cheapest Path for the final rel. (By
+ * doing this here not in grouping_planner, we include initPlan costs in
+ * the decision, though it's unlikely that will change anything.)
+ */
+ set_cheapest(final_rel);
+
+ return root;
+}
+
+/*
+ * preprocess_expression
+ * Do subquery_planner's preprocessing work for an expression,
+ * which can be a targetlist, a WHERE clause (including JOIN/ON
+ * conditions), a HAVING clause, or a few other things.
+ */
+static Node *
+preprocess_expression(PlannerInfo *root, Node *expr, int kind)
+{
+ /*
+ * Fall out quickly if expression is empty. This occurs often enough to
+ * be worth checking. Note that null->null is the correct conversion for
+ * implicit-AND result format, too.
+ */
+ if (expr == NULL)
+ return NULL;
+
+ /*
+ * If the query has any join RTEs, replace join alias variables with
+ * base-relation variables. We must do this first, since any expressions
+ * we may extract from the joinaliasvars lists have not been preprocessed.
+ * For example, if we did this after sublink processing, sublinks expanded
+ * out from join aliases would not get processed. But we can skip this in
+ * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
+ * they can't contain any Vars of the current query level.
+ */
+ if (root->hasJoinRTEs &&
+ !(kind == EXPRKIND_RTFUNC ||
+ kind == EXPRKIND_VALUES ||
+ kind == EXPRKIND_TABLESAMPLE ||
+ kind == EXPRKIND_TABLEFUNC))
+ expr = flatten_join_alias_vars(root->parse, expr);
+
+ /*
+ * Simplify constant expressions. For function RTEs, this was already
+ * done by preprocess_function_rtes. (But note we must do it again for
+ * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
+ * un-simplified subexpressions inserted by flattening of subqueries or
+ * join alias variables.)
+ *
+ * Note: an essential effect of this is to convert named-argument function
+ * calls to positional notation and insert the current actual values of
+ * any default arguments for functions. To ensure that happens, we *must*
+ * process all expressions here. Previous PG versions sometimes skipped
+ * const-simplification if it didn't seem worth the trouble, but we can't
+ * do that anymore.
+ *
+ * Note: this also flattens nested AND and OR expressions into N-argument
+ * form. All processing of a qual expression after this point must be
+ * careful to maintain AND/OR flatness --- that is, do not generate a tree
+ * with AND directly under AND, nor OR directly under OR.
+ */
+ if (kind != EXPRKIND_RTFUNC)
+ expr = eval_const_expressions(root, expr);
+
+ /*
+ * If it's a qual or havingQual, canonicalize it.
+ */
+ if (kind == EXPRKIND_QUAL)
+ {
+ expr = (Node *) canonicalize_qual((Expr *) expr, false);
+
+#ifdef OPTIMIZER_DEBUG
+ printf("After canonicalize_qual()\n");
+ pprint(expr);
+#endif
+ }
+
+ /*
+ * Check for ANY ScalarArrayOpExpr with Const arrays and set the
+ * hashfuncid of any that might execute more quickly by using hash lookups
+ * instead of a linear search.
+ */
+ if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
+ {
+ convert_saop_to_hashed_saop(expr);
+ }
+
+ /* Expand SubLinks to SubPlans */
+ if (root->parse->hasSubLinks)
+ expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
+
+ /*
+ * XXX do not insert anything here unless you have grokked the comments in
+ * SS_replace_correlation_vars ...
+ */
+
+ /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
+ if (root->query_level > 1)
+ expr = SS_replace_correlation_vars(root, expr);
+
+ /*
+ * If it's a qual or havingQual, convert it to implicit-AND format. (We
+ * don't want to do this before eval_const_expressions, since the latter
+ * would be unable to simplify a top-level AND correctly. Also,
+ * SS_process_sublinks expects explicit-AND format.)
+ */
+ if (kind == EXPRKIND_QUAL)
+ expr = (Node *) make_ands_implicit((Expr *) expr);
+
+ return expr;
+}
+
+/*
+ * preprocess_qual_conditions
+ * Recursively scan the query's jointree and do subquery_planner's
+ * preprocessing work on each qual condition found therein.
+ */
+static void
+preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
+{
+ if (jtnode == NULL)
+ return;
+ if (IsA(jtnode, RangeTblRef))
+ {
+ /* nothing to do here */
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ ListCell *l;
+
+ foreach(l, f->fromlist)
+ preprocess_qual_conditions(root, lfirst(l));
+
+ f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+
+ preprocess_qual_conditions(root, j->larg);
+ preprocess_qual_conditions(root, j->rarg);
+
+ j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+}
+
+/*
+ * preprocess_phv_expression
+ * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
+ *
+ * If a LATERAL subquery references an output of another subquery, and that
+ * output must be wrapped in a PlaceHolderVar because of an intermediate outer
+ * join, then we'll push the PlaceHolderVar expression down into the subquery
+ * and later pull it back up during find_lateral_references, which runs after
+ * subquery_planner has preprocessed all the expressions that were in the
+ * current query level to start with. So we need to preprocess it then.
+ */
+Expr *
+preprocess_phv_expression(PlannerInfo *root, Expr *expr)
+{
+ return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
+}
+
+/*--------------------
+ * grouping_planner
+ * Perform planning steps related to grouping, aggregation, etc.
+ *
+ * This function adds all required top-level processing to the scan/join
+ * Path(s) produced by query_planner.
+ *
+ * tuple_fraction is the fraction of tuples we expect will be retrieved.
+ * tuple_fraction is interpreted as follows:
+ * 0: expect all tuples to be retrieved (normal case)
+ * 0 < tuple_fraction < 1: expect the given fraction of tuples available
+ * from the plan to be retrieved
+ * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
+ * expected to be retrieved (ie, a LIMIT specification)
+ *
+ * Returns nothing; the useful output is in the Paths we attach to the
+ * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
+ * root->processed_tlist contains the final processed targetlist.
+ *
+ * Note that we have not done set_cheapest() on the final rel; it's convenient
+ * to leave this to the caller.
+ *--------------------
+ */
+static void
+grouping_planner(PlannerInfo *root, double tuple_fraction)
+{
+ Query *parse = root->parse;
+ int64 offset_est = 0;
+ int64 count_est = 0;
+ double limit_tuples = -1.0;
+ bool have_postponed_srfs = false;
+ PathTarget *final_target;
+ List *final_targets;
+ List *final_targets_contain_srfs;
+ bool final_target_parallel_safe;
+ RelOptInfo *current_rel;
+ RelOptInfo *final_rel;
+ FinalPathExtraData extra;
+ ListCell *lc;
+
+ /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
+ if (parse->limitCount || parse->limitOffset)
+ {
+ tuple_fraction = preprocess_limit(root, tuple_fraction,
+ &offset_est, &count_est);
+
+ /*
+ * If we have a known LIMIT, and don't have an unknown OFFSET, we can
+ * estimate the effects of using a bounded sort.
+ */
+ if (count_est > 0 && offset_est >= 0)
+ limit_tuples = (double) count_est + (double) offset_est;
+ }
+
+ /* Make tuple_fraction accessible to lower-level routines */
+ root->tuple_fraction = tuple_fraction;
+
+ if (parse->setOperations)
+ {
+ /*
+ * If there's a top-level ORDER BY, assume we have to fetch all the
+ * tuples. This might be too simplistic given all the hackery below
+ * to possibly avoid the sort; but the odds of accurate estimates here
+ * are pretty low anyway. XXX try to get rid of this in favor of
+ * letting plan_set_operations generate both fast-start and
+ * cheapest-total paths.
+ */
+ if (parse->sortClause)
+ root->tuple_fraction = 0.0;
+
+ /*
+ * Construct Paths for set operations. The results will not need any
+ * work except perhaps a top-level sort and/or LIMIT. Note that any
+ * special work for recursive unions is the responsibility of
+ * plan_set_operations.
+ */
+ current_rel = plan_set_operations(root);
+
+ /*
+ * We should not need to call preprocess_targetlist, since we must be
+ * in a SELECT query node. Instead, use the processed_tlist returned
+ * by plan_set_operations (since this tells whether it returned any
+ * resjunk columns!), and transfer any sort key information from the
+ * original tlist.
+ */
+ Assert(parse->commandType == CMD_SELECT);
+
+ /* for safety, copy processed_tlist instead of modifying in-place */
+ root->processed_tlist =
+ postprocess_setop_tlist(copyObject(root->processed_tlist),
+ parse->targetList);
+
+ /* Also extract the PathTarget form of the setop result tlist */
+ final_target = current_rel->cheapest_total_path->pathtarget;
+
+ /* And check whether it's parallel safe */
+ final_target_parallel_safe =
+ is_parallel_safe(root, (Node *) final_target->exprs);
+
+ /* The setop result tlist couldn't contain any SRFs */
+ Assert(!parse->hasTargetSRFs);
+ final_targets = final_targets_contain_srfs = NIL;
+
+ /*
+ * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
+ * checked already, but let's make sure).
+ */
+ if (parse->rowMarks)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
+ errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
+ LCS_asString(linitial_node(RowMarkClause,
+ parse->rowMarks)->strength))));
+
+ /*
+ * Calculate pathkeys that represent result ordering requirements
+ */
+ Assert(parse->distinctClause == NIL);
+ root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
+ parse->sortClause,
+ root->processed_tlist);
+ }
+ else
+ {
+ /* No set operations, do regular planning */
+ PathTarget *sort_input_target;
+ List *sort_input_targets;
+ List *sort_input_targets_contain_srfs;
+ bool sort_input_target_parallel_safe;
+ PathTarget *grouping_target;
+ List *grouping_targets;
+ List *grouping_targets_contain_srfs;
+ bool grouping_target_parallel_safe;
+ PathTarget *scanjoin_target;
+ List *scanjoin_targets;
+ List *scanjoin_targets_contain_srfs;
+ bool scanjoin_target_parallel_safe;
+ bool scanjoin_target_same_exprs;
+ bool have_grouping;
+ WindowFuncLists *wflists = NULL;
+ List *activeWindows = NIL;
+ grouping_sets_data *gset_data = NULL;
+ standard_qp_extra qp_extra;
+
+ /* A recursive query should always have setOperations */
+ Assert(!root->hasRecursion);
+
+ /* Preprocess grouping sets and GROUP BY clause, if any */
+ if (parse->groupingSets)
+ {
+ gset_data = preprocess_grouping_sets(root);
+ }
+ else
+ {
+ /* Preprocess regular GROUP BY clause, if any */
+ if (parse->groupClause)
+ parse->groupClause = preprocess_groupclause(root, NIL);
+ }
+
+ /*
+ * Preprocess targetlist. Note that much of the remaining planning
+ * work will be done with the PathTarget representation of tlists, but
+ * we must also maintain the full representation of the final tlist so
+ * that we can transfer its decoration (resnames etc) to the topmost
+ * tlist of the finished Plan. This is kept in processed_tlist.
+ */
+ preprocess_targetlist(root);
+
+ /*
+ * Mark all the aggregates with resolved aggtranstypes, and detect
+ * aggregates that are duplicates or can share transition state. We
+ * must do this before slicing and dicing the tlist into various
+ * pathtargets, else some copies of the Aggref nodes might escape
+ * being marked.
+ */
+ if (parse->hasAggs)
+ {
+ preprocess_aggrefs(root, (Node *) root->processed_tlist);
+ preprocess_aggrefs(root, (Node *) parse->havingQual);
+ }
+
+ /*
+ * Locate any window functions in the tlist. (We don't need to look
+ * anywhere else, since expressions used in ORDER BY will be in there
+ * too.) Note that they could all have been eliminated by constant
+ * folding, in which case we don't need to do any more work.
+ */
+ if (parse->hasWindowFuncs)
+ {
+ wflists = find_window_functions((Node *) root->processed_tlist,
+ list_length(parse->windowClause));
+ if (wflists->numWindowFuncs > 0)
+ activeWindows = select_active_windows(root, wflists);
+ else
+ parse->hasWindowFuncs = false;
+ }
+
+ /*
+ * Preprocess MIN/MAX aggregates, if any. Note: be careful about
+ * adding logic between here and the query_planner() call. Anything
+ * that is needed in MIN/MAX-optimizable cases will have to be
+ * duplicated in planagg.c.
+ */
+ if (parse->hasAggs)
+ preprocess_minmax_aggregates(root);
+
+ /*
+ * Figure out whether there's a hard limit on the number of rows that
+ * query_planner's result subplan needs to return. Even if we know a
+ * hard limit overall, it doesn't apply if the query has any
+ * grouping/aggregation operations, or SRFs in the tlist.
+ */
+ if (parse->groupClause ||
+ parse->groupingSets ||
+ parse->distinctClause ||
+ parse->hasAggs ||
+ parse->hasWindowFuncs ||
+ parse->hasTargetSRFs ||
+ root->hasHavingQual)
+ root->limit_tuples = -1.0;
+ else
+ root->limit_tuples = limit_tuples;
+
+ /* Set up data needed by standard_qp_callback */
+ qp_extra.activeWindows = activeWindows;
+ qp_extra.groupClause = (gset_data
+ ? (gset_data->rollups ? linitial_node(RollupData, gset_data->rollups)->groupClause : NIL)
+ : parse->groupClause);
+
+ /*
+ * Generate the best unsorted and presorted paths for the scan/join
+ * portion of this Query, ie the processing represented by the
+ * FROM/WHERE clauses. (Note there may not be any presorted paths.)
+ * We also generate (in standard_qp_callback) pathkey representations
+ * of the query's sort clause, distinct clause, etc.
+ */
+ current_rel = query_planner(root, standard_qp_callback, &qp_extra);
+
+ /*
+ * Convert the query's result tlist into PathTarget format.
+ *
+ * Note: this cannot be done before query_planner() has performed
+ * appendrel expansion, because that might add resjunk entries to
+ * root->processed_tlist. Waiting till afterwards is also helpful
+ * because the target width estimates can use per-Var width numbers
+ * that were obtained within query_planner().
+ */
+ final_target = create_pathtarget(root, root->processed_tlist);
+ final_target_parallel_safe =
+ is_parallel_safe(root, (Node *) final_target->exprs);
+
+ /*
+ * If ORDER BY was given, consider whether we should use a post-sort
+ * projection, and compute the adjusted target for preceding steps if
+ * so.
+ */
+ if (parse->sortClause)
+ {
+ sort_input_target = make_sort_input_target(root,
+ final_target,
+ &have_postponed_srfs);
+ sort_input_target_parallel_safe =
+ is_parallel_safe(root, (Node *) sort_input_target->exprs);
+ }
+ else
+ {
+ sort_input_target = final_target;
+ sort_input_target_parallel_safe = final_target_parallel_safe;
+ }
+
+ /*
+ * If we have window functions to deal with, the output from any
+ * grouping step needs to be what the window functions want;
+ * otherwise, it should be sort_input_target.
+ */
+ if (activeWindows)
+ {
+ grouping_target = make_window_input_target(root,
+ final_target,
+ activeWindows);
+ grouping_target_parallel_safe =
+ is_parallel_safe(root, (Node *) grouping_target->exprs);
+ }
+ else
+ {
+ grouping_target = sort_input_target;
+ grouping_target_parallel_safe = sort_input_target_parallel_safe;
+ }
+
+ /*
+ * If we have grouping or aggregation to do, the topmost scan/join
+ * plan node must emit what the grouping step wants; otherwise, it
+ * should emit grouping_target.
+ */
+ have_grouping = (parse->groupClause || parse->groupingSets ||
+ parse->hasAggs || root->hasHavingQual);
+ if (have_grouping)
+ {
+ scanjoin_target = make_group_input_target(root, final_target);
+ scanjoin_target_parallel_safe =
+ is_parallel_safe(root, (Node *) scanjoin_target->exprs);
+ }
+ else
+ {
+ scanjoin_target = grouping_target;
+ scanjoin_target_parallel_safe = grouping_target_parallel_safe;
+ }
+
+ /*
+ * If there are any SRFs in the targetlist, we must separate each of
+ * these PathTargets into SRF-computing and SRF-free targets. Replace
+ * each of the named targets with a SRF-free version, and remember the
+ * list of additional projection steps we need to add afterwards.
+ */
+ if (parse->hasTargetSRFs)
+ {
+ /* final_target doesn't recompute any SRFs in sort_input_target */
+ split_pathtarget_at_srfs(root, final_target, sort_input_target,
+ &final_targets,
+ &final_targets_contain_srfs);
+ final_target = linitial_node(PathTarget, final_targets);
+ Assert(!linitial_int(final_targets_contain_srfs));
+ /* likewise for sort_input_target vs. grouping_target */
+ split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
+ &sort_input_targets,
+ &sort_input_targets_contain_srfs);
+ sort_input_target = linitial_node(PathTarget, sort_input_targets);
+ Assert(!linitial_int(sort_input_targets_contain_srfs));
+ /* likewise for grouping_target vs. scanjoin_target */
+ split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
+ &grouping_targets,
+ &grouping_targets_contain_srfs);
+ grouping_target = linitial_node(PathTarget, grouping_targets);
+ Assert(!linitial_int(grouping_targets_contain_srfs));
+ /* scanjoin_target will not have any SRFs precomputed for it */
+ split_pathtarget_at_srfs(root, scanjoin_target, NULL,
+ &scanjoin_targets,
+ &scanjoin_targets_contain_srfs);
+ scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
+ Assert(!linitial_int(scanjoin_targets_contain_srfs));
+ }
+ else
+ {
+ /* initialize lists; for most of these, dummy values are OK */
+ final_targets = final_targets_contain_srfs = NIL;
+ sort_input_targets = sort_input_targets_contain_srfs = NIL;
+ grouping_targets = grouping_targets_contain_srfs = NIL;
+ scanjoin_targets = list_make1(scanjoin_target);
+ scanjoin_targets_contain_srfs = NIL;
+ }
+
+ /* Apply scan/join target. */
+ scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
+ && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
+ apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
+ scanjoin_targets_contain_srfs,
+ scanjoin_target_parallel_safe,
+ scanjoin_target_same_exprs);
+
+ /*
+ * Save the various upper-rel PathTargets we just computed into
+ * root->upper_targets[]. The core code doesn't use this, but it
+ * provides a convenient place for extensions to get at the info. For
+ * consistency, we save all the intermediate targets, even though some
+ * of the corresponding upperrels might not be needed for this query.
+ */
+ root->upper_targets[UPPERREL_FINAL] = final_target;
+ root->upper_targets[UPPERREL_ORDERED] = final_target;
+ root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
+ root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
+ root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
+ root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
+
+ /*
+ * If we have grouping and/or aggregation, consider ways to implement
+ * that. We build a new upperrel representing the output of this
+ * phase.
+ */
+ if (have_grouping)
+ {
+ current_rel = create_grouping_paths(root,
+ current_rel,
+ grouping_target,
+ grouping_target_parallel_safe,
+ gset_data);
+ /* Fix things up if grouping_target contains SRFs */
+ if (parse->hasTargetSRFs)
+ adjust_paths_for_srfs(root, current_rel,
+ grouping_targets,
+ grouping_targets_contain_srfs);
+ }
+
+ /*
+ * If we have window functions, consider ways to implement those. We
+ * build a new upperrel representing the output of this phase.
+ */
+ if (activeWindows)
+ {
+ current_rel = create_window_paths(root,
+ current_rel,
+ grouping_target,
+ sort_input_target,
+ sort_input_target_parallel_safe,
+ wflists,
+ activeWindows);
+ /* Fix things up if sort_input_target contains SRFs */
+ if (parse->hasTargetSRFs)
+ adjust_paths_for_srfs(root, current_rel,
+ sort_input_targets,
+ sort_input_targets_contain_srfs);
+ }
+
+ /*
+ * If there is a DISTINCT clause, consider ways to implement that. We
+ * build a new upperrel representing the output of this phase.
+ */
+ if (parse->distinctClause)
+ {
+ current_rel = create_distinct_paths(root,
+ current_rel);
+ }
+ } /* end of if (setOperations) */
+
+ /*
+ * If ORDER BY was given, consider ways to implement that, and generate a
+ * new upperrel containing only paths that emit the correct ordering and
+ * project the correct final_target. We can apply the original
+ * limit_tuples limit in sort costing here, but only if there are no
+ * postponed SRFs.
+ */
+ if (parse->sortClause)
+ {
+ current_rel = create_ordered_paths(root,
+ current_rel,
+ final_target,
+ final_target_parallel_safe,
+ have_postponed_srfs ? -1.0 :
+ limit_tuples);
+ /* Fix things up if final_target contains SRFs */
+ if (parse->hasTargetSRFs)
+ adjust_paths_for_srfs(root, current_rel,
+ final_targets,
+ final_targets_contain_srfs);
+ }
+
+ /*
+ * Now we are prepared to build the final-output upperrel.
+ */
+ final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
+
+ /*
+ * If the input rel is marked consider_parallel and there's nothing that's
+ * not parallel-safe in the LIMIT clause, then the final_rel can be marked
+ * consider_parallel as well. Note that if the query has rowMarks or is
+ * not a SELECT, consider_parallel will be false for every relation in the
+ * query.
+ */
+ if (current_rel->consider_parallel &&
+ is_parallel_safe(root, parse->limitOffset) &&
+ is_parallel_safe(root, parse->limitCount))
+ final_rel->consider_parallel = true;
+
+ /*
+ * If the current_rel belongs to a single FDW, so does the final_rel.
+ */
+ final_rel->serverid = current_rel->serverid;
+ final_rel->userid = current_rel->userid;
+ final_rel->useridiscurrent = current_rel->useridiscurrent;
+ final_rel->fdwroutine = current_rel->fdwroutine;
+
+ /*
+ * Generate paths for the final_rel. Insert all surviving paths, with
+ * LockRows, Limit, and/or ModifyTable steps added if needed.
+ */
+ foreach(lc, current_rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+
+ /*
+ * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
+ * (Note: we intentionally test parse->rowMarks not root->rowMarks
+ * here. If there are only non-locking rowmarks, they should be
+ * handled by the ModifyTable node instead. However, root->rowMarks
+ * is what goes into the LockRows node.)
+ */
+ if (parse->rowMarks)
+ {
+ path = (Path *) create_lockrows_path(root, final_rel, path,
+ root->rowMarks,
+ assign_special_exec_param(root));
+ }
+
+ /*
+ * If there is a LIMIT/OFFSET clause, add the LIMIT node.
+ */
+ if (limit_needed(parse))
+ {
+ path = (Path *) create_limit_path(root, final_rel, path,
+ parse->limitOffset,
+ parse->limitCount,
+ parse->limitOption,
+ offset_est, count_est);
+ }
+
+ /*
+ * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
+ */
+ if (parse->commandType != CMD_SELECT)
+ {
+ Index rootRelation;
+ List *resultRelations = NIL;
+ List *updateColnosLists = NIL;
+ List *withCheckOptionLists = NIL;
+ List *returningLists = NIL;
+ List *mergeActionLists = NIL;
+ List *rowMarks;
+
+ if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
+ {
+ /* Inherited UPDATE/DELETE/MERGE */
+ RelOptInfo *top_result_rel = find_base_rel(root,
+ parse->resultRelation);
+ int resultRelation = -1;
+
+ /* Pass the root result rel forward to the executor. */
+ rootRelation = parse->resultRelation;
+
+ /* Add only leaf children to ModifyTable. */
+ while ((resultRelation = bms_next_member(root->leaf_result_relids,
+ resultRelation)) >= 0)
+ {
+ RelOptInfo *this_result_rel = find_base_rel(root,
+ resultRelation);
+
+ /*
+ * Also exclude any leaf rels that have turned dummy since
+ * being added to the list, for example, by being excluded
+ * by constraint exclusion.
+ */
+ if (IS_DUMMY_REL(this_result_rel))
+ continue;
+
+ /* Build per-target-rel lists needed by ModifyTable */
+ resultRelations = lappend_int(resultRelations,
+ resultRelation);
+ if (parse->commandType == CMD_UPDATE)
+ {
+ List *update_colnos = root->update_colnos;
+
+ if (this_result_rel != top_result_rel)
+ update_colnos =
+ adjust_inherited_attnums_multilevel(root,
+ update_colnos,
+ this_result_rel->relid,
+ top_result_rel->relid);
+ updateColnosLists = lappend(updateColnosLists,
+ update_colnos);
+ }
+ if (parse->withCheckOptions)
+ {
+ List *withCheckOptions = parse->withCheckOptions;
+
+ if (this_result_rel != top_result_rel)
+ withCheckOptions = (List *)
+ adjust_appendrel_attrs_multilevel(root,
+ (Node *) withCheckOptions,
+ this_result_rel->relids,
+ top_result_rel->relids);
+ withCheckOptionLists = lappend(withCheckOptionLists,
+ withCheckOptions);
+ }
+ if (parse->returningList)
+ {
+ List *returningList = parse->returningList;
+
+ if (this_result_rel != top_result_rel)
+ returningList = (List *)
+ adjust_appendrel_attrs_multilevel(root,
+ (Node *) returningList,
+ this_result_rel->relids,
+ top_result_rel->relids);
+ returningLists = lappend(returningLists,
+ returningList);
+ }
+ if (parse->mergeActionList)
+ {
+ ListCell *l;
+ List *mergeActionList = NIL;
+
+ /*
+ * Copy MergeActions and translate stuff that
+ * references attribute numbers.
+ */
+ foreach(l, parse->mergeActionList)
+ {
+ MergeAction *action = lfirst(l),
+ *leaf_action = copyObject(action);
+
+ leaf_action->qual =
+ adjust_appendrel_attrs_multilevel(root,
+ (Node *) action->qual,
+ this_result_rel->relids,
+ top_result_rel->relids);
+ leaf_action->targetList = (List *)
+ adjust_appendrel_attrs_multilevel(root,
+ (Node *) action->targetList,
+ this_result_rel->relids,
+ top_result_rel->relids);
+ if (leaf_action->commandType == CMD_UPDATE)
+ leaf_action->updateColnos =
+ adjust_inherited_attnums_multilevel(root,
+ action->updateColnos,
+ this_result_rel->relid,
+ top_result_rel->relid);
+ mergeActionList = lappend(mergeActionList,
+ leaf_action);
+ }
+
+ mergeActionLists = lappend(mergeActionLists,
+ mergeActionList);
+ }
+ }
+
+ if (resultRelations == NIL)
+ {
+ /*
+ * We managed to exclude every child rel, so generate a
+ * dummy one-relation plan using info for the top target
+ * rel (even though that may not be a leaf target).
+ * Although it's clear that no data will be updated or
+ * deleted, we still need to have a ModifyTable node so
+ * that any statement triggers will be executed. (This
+ * could be cleaner if we fixed nodeModifyTable.c to allow
+ * zero target relations, but that probably wouldn't be a
+ * net win.)
+ */
+ resultRelations = list_make1_int(parse->resultRelation);
+ if (parse->commandType == CMD_UPDATE)
+ updateColnosLists = list_make1(root->update_colnos);
+ if (parse->withCheckOptions)
+ withCheckOptionLists = list_make1(parse->withCheckOptions);
+ if (parse->returningList)
+ returningLists = list_make1(parse->returningList);
+ if (parse->mergeActionList)
+ mergeActionLists = list_make1(parse->mergeActionList);
+ }
+ }
+ else
+ {
+ /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
+ rootRelation = 0; /* there's no separate root rel */
+ resultRelations = list_make1_int(parse->resultRelation);
+ if (parse->commandType == CMD_UPDATE)
+ updateColnosLists = list_make1(root->update_colnos);
+ if (parse->withCheckOptions)
+ withCheckOptionLists = list_make1(parse->withCheckOptions);
+ if (parse->returningList)
+ returningLists = list_make1(parse->returningList);
+ if (parse->mergeActionList)
+ mergeActionLists = list_make1(parse->mergeActionList);
+ }
+
+ /*
+ * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
+ * will have dealt with fetching non-locked marked rows, else we
+ * need to have ModifyTable do that.
+ */
+ if (parse->rowMarks)
+ rowMarks = NIL;
+ else
+ rowMarks = root->rowMarks;
+
+ path = (Path *)
+ create_modifytable_path(root, final_rel,
+ path,
+ parse->commandType,
+ parse->canSetTag,
+ parse->resultRelation,
+ rootRelation,
+ root->partColsUpdated,
+ resultRelations,
+ updateColnosLists,
+ withCheckOptionLists,
+ returningLists,
+ rowMarks,
+ parse->onConflict,
+ mergeActionLists,
+ assign_special_exec_param(root));
+ }
+
+ /* And shove it into final_rel */
+ add_path(final_rel, path);
+ }
+
+ /*
+ * Generate partial paths for final_rel, too, if outer query levels might
+ * be able to make use of them.
+ */
+ if (final_rel->consider_parallel && root->query_level > 1 &&
+ !limit_needed(parse))
+ {
+ Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
+ foreach(lc, current_rel->partial_pathlist)
+ {
+ Path *partial_path = (Path *) lfirst(lc);
+
+ add_partial_path(final_rel, partial_path);
+ }
+ }
+
+ extra.limit_needed = limit_needed(parse);
+ extra.limit_tuples = limit_tuples;
+ extra.count_est = count_est;
+ extra.offset_est = offset_est;
+
+ /*
+ * If there is an FDW that's responsible for all baserels of the query,
+ * let it consider adding ForeignPaths.
+ */
+ if (final_rel->fdwroutine &&
+ final_rel->fdwroutine->GetForeignUpperPaths)
+ final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
+ current_rel, final_rel,
+ &extra);
+
+ /* Let extensions possibly add some more paths */
+ if (create_upper_paths_hook)
+ (*create_upper_paths_hook) (root, UPPERREL_FINAL,
+ current_rel, final_rel, &extra);
+
+ /* Note: currently, we leave it to callers to do set_cheapest() */
+}
+
+/*
+ * Do preprocessing for groupingSets clause and related data. This handles the
+ * preliminary steps of expanding the grouping sets, organizing them into lists
+ * of rollups, and preparing annotations which will later be filled in with
+ * size estimates.
+ */
+static grouping_sets_data *
+preprocess_grouping_sets(PlannerInfo *root)
+{
+ Query *parse = root->parse;
+ List *sets;
+ int maxref = 0;
+ ListCell *lc;
+ ListCell *lc_set;
+ grouping_sets_data *gd = palloc0(sizeof(grouping_sets_data));
+
+ parse->groupingSets = expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
+
+ gd->any_hashable = false;
+ gd->unhashable_refs = NULL;
+ gd->unsortable_refs = NULL;
+ gd->unsortable_sets = NIL;
+
+ if (parse->groupClause)
+ {
+ ListCell *lc;
+
+ foreach(lc, parse->groupClause)
+ {
+ SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
+ Index ref = gc->tleSortGroupRef;
+
+ if (ref > maxref)
+ maxref = ref;
+
+ if (!gc->hashable)
+ gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
+
+ if (!OidIsValid(gc->sortop))
+ gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
+ }
+ }
+
+ /* Allocate workspace array for remapping */
+ gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
+
+ /*
+ * If we have any unsortable sets, we must extract them before trying to
+ * prepare rollups. Unsortable sets don't go through
+ * reorder_grouping_sets, so we must apply the GroupingSetData annotation
+ * here.
+ */
+ if (!bms_is_empty(gd->unsortable_refs))
+ {
+ List *sortable_sets = NIL;
+
+ foreach(lc, parse->groupingSets)
+ {
+ List *gset = (List *) lfirst(lc);
+
+ if (bms_overlap_list(gd->unsortable_refs, gset))
+ {
+ GroupingSetData *gs = makeNode(GroupingSetData);
+
+ gs->set = gset;
+ gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
+
+ /*
+ * We must enforce here that an unsortable set is hashable;
+ * later code assumes this. Parse analysis only checks that
+ * every individual column is either hashable or sortable.
+ *
+ * Note that passing this test doesn't guarantee we can
+ * generate a plan; there might be other showstoppers.
+ */
+ if (bms_overlap_list(gd->unhashable_refs, gset))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("could not implement GROUP BY"),
+ errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
+ }
+ else
+ sortable_sets = lappend(sortable_sets, gset);
+ }
+
+ if (sortable_sets)
+ sets = extract_rollup_sets(sortable_sets);
+ else
+ sets = NIL;
+ }
+ else
+ sets = extract_rollup_sets(parse->groupingSets);
+
+ foreach(lc_set, sets)
+ {
+ List *current_sets = (List *) lfirst(lc_set);
+ RollupData *rollup = makeNode(RollupData);
+ GroupingSetData *gs;
+
+ /*
+ * Reorder the current list of grouping sets into correct prefix
+ * order. If only one aggregation pass is needed, try to make the
+ * list match the ORDER BY clause; if more than one pass is needed, we
+ * don't bother with that.
+ *
+ * Note that this reorders the sets from smallest-member-first to
+ * largest-member-first, and applies the GroupingSetData annotations,
+ * though the data will be filled in later.
+ */
+ current_sets = reorder_grouping_sets(current_sets,
+ (list_length(sets) == 1
+ ? parse->sortClause
+ : NIL));
+
+ /*
+ * Get the initial (and therefore largest) grouping set.
+ */
+ gs = linitial_node(GroupingSetData, current_sets);
+
+ /*
+ * Order the groupClause appropriately. If the first grouping set is
+ * empty, then the groupClause must also be empty; otherwise we have
+ * to force the groupClause to match that grouping set's order.
+ *
+ * (The first grouping set can be empty even though parse->groupClause
+ * is not empty only if all non-empty grouping sets are unsortable.
+ * The groupClauses for hashed grouping sets are built later on.)
+ */
+ if (gs->set)
+ rollup->groupClause = preprocess_groupclause(root, gs->set);
+ else
+ rollup->groupClause = NIL;
+
+ /*
+ * Is it hashable? We pretend empty sets are hashable even though we
+ * actually force them not to be hashed later. But don't bother if
+ * there's nothing but empty sets (since in that case we can't hash
+ * anything).
+ */
+ if (gs->set &&
+ !bms_overlap_list(gd->unhashable_refs, gs->set))
+ {
+ rollup->hashable = true;
+ gd->any_hashable = true;
+ }
+
+ /*
+ * Now that we've pinned down an order for the groupClause for this
+ * list of grouping sets, we need to remap the entries in the grouping
+ * sets from sortgrouprefs to plain indices (0-based) into the
+ * groupClause for this collection of grouping sets. We keep the
+ * original form for later use, though.
+ */
+ rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
+ current_sets,
+ gd->tleref_to_colnum_map);
+ rollup->gsets_data = current_sets;
+
+ gd->rollups = lappend(gd->rollups, rollup);
+ }
+
+ if (gd->unsortable_sets)
+ {
+ /*
+ * We have not yet pinned down a groupclause for this, but we will
+ * need index-based lists for estimation purposes. Construct
+ * hash_sets_idx based on the entire original groupclause for now.
+ */
+ gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
+ gd->unsortable_sets,
+ gd->tleref_to_colnum_map);
+ gd->any_hashable = true;
+ }
+
+ return gd;
+}
+
+/*
+ * Given a groupclause and a list of GroupingSetData, return equivalent sets
+ * (without annotation) mapped to indexes into the given groupclause.
+ */
+static List *
+remap_to_groupclause_idx(List *groupClause,
+ List *gsets,
+ int *tleref_to_colnum_map)
+{
+ int ref = 0;
+ List *result = NIL;
+ ListCell *lc;
+
+ foreach(lc, groupClause)
+ {
+ SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
+
+ tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
+ }
+
+ foreach(lc, gsets)
+ {
+ List *set = NIL;
+ ListCell *lc2;
+ GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
+
+ foreach(lc2, gs->set)
+ {
+ set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
+ }
+
+ result = lappend(result, set);
+ }
+
+ return result;
+}
+
+
+/*
+ * preprocess_rowmarks - set up PlanRowMarks if needed
+ */
+static void
+preprocess_rowmarks(PlannerInfo *root)
+{
+ Query *parse = root->parse;
+ Bitmapset *rels;
+ List *prowmarks;
+ ListCell *l;
+ int i;
+
+ if (parse->rowMarks)
+ {
+ /*
+ * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
+ * grouping, since grouping renders a reference to individual tuple
+ * CTIDs invalid. This is also checked at parse time, but that's
+ * insufficient because of rule substitution, query pullup, etc.
+ */
+ CheckSelectLocking(parse, linitial_node(RowMarkClause,
+ parse->rowMarks)->strength);
+ }
+ else
+ {
+ /*
+ * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
+ * UPDATE/SHARE.
+ */
+ if (parse->commandType != CMD_UPDATE &&
+ parse->commandType != CMD_DELETE &&
+ parse->commandType != CMD_MERGE)
+ return;
+ }
+
+ /*
+ * We need to have rowmarks for all base relations except the target. We
+ * make a bitmapset of all base rels and then remove the items we don't
+ * need or have FOR [KEY] UPDATE/SHARE marks for.
+ */
+ rels = get_relids_in_jointree((Node *) parse->jointree, false);
+ if (parse->resultRelation)
+ rels = bms_del_member(rels, parse->resultRelation);
+
+ /*
+ * Convert RowMarkClauses to PlanRowMark representation.
+ */
+ prowmarks = NIL;
+ foreach(l, parse->rowMarks)
+ {
+ RowMarkClause *rc = lfirst_node(RowMarkClause, l);
+ RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
+ PlanRowMark *newrc;
+
+ /*
+ * Currently, it is syntactically impossible to have FOR UPDATE et al
+ * applied to an update/delete target rel. If that ever becomes
+ * possible, we should drop the target from the PlanRowMark list.
+ */
+ Assert(rc->rti != parse->resultRelation);
+
+ /*
+ * Ignore RowMarkClauses for subqueries; they aren't real tables and
+ * can't support true locking. Subqueries that got flattened into the
+ * main query should be ignored completely. Any that didn't will get
+ * ROW_MARK_COPY items in the next loop.
+ */
+ if (rte->rtekind != RTE_RELATION)
+ continue;
+
+ rels = bms_del_member(rels, rc->rti);
+
+ newrc = makeNode(PlanRowMark);
+ newrc->rti = newrc->prti = rc->rti;
+ newrc->rowmarkId = ++(root->glob->lastRowMarkId);
+ newrc->markType = select_rowmark_type(rte, rc->strength);
+ newrc->allMarkTypes = (1 << newrc->markType);
+ newrc->strength = rc->strength;
+ newrc->waitPolicy = rc->waitPolicy;
+ newrc->isParent = false;
+
+ prowmarks = lappend(prowmarks, newrc);
+ }
+
+ /*
+ * Now, add rowmarks for any non-target, non-locked base relations.
+ */
+ i = 0;
+ foreach(l, parse->rtable)
+ {
+ RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
+ PlanRowMark *newrc;
+
+ i++;
+ if (!bms_is_member(i, rels))
+ continue;
+
+ newrc = makeNode(PlanRowMark);
+ newrc->rti = newrc->prti = i;
+ newrc->rowmarkId = ++(root->glob->lastRowMarkId);
+ newrc->markType = select_rowmark_type(rte, LCS_NONE);
+ newrc->allMarkTypes = (1 << newrc->markType);
+ newrc->strength = LCS_NONE;
+ newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
+ newrc->isParent = false;
+
+ prowmarks = lappend(prowmarks, newrc);
+ }
+
+ root->rowMarks = prowmarks;
+}
+
+/*
+ * Select RowMarkType to use for a given table
+ */
+RowMarkType
+select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
+{
+ if (rte->rtekind != RTE_RELATION)
+ {
+ /* If it's not a table at all, use ROW_MARK_COPY */
+ return ROW_MARK_COPY;
+ }
+ else if (rte->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ /* Let the FDW select the rowmark type, if it wants to */
+ FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
+
+ if (fdwroutine->GetForeignRowMarkType != NULL)
+ return fdwroutine->GetForeignRowMarkType(rte, strength);
+ /* Otherwise, use ROW_MARK_COPY by default */
+ return ROW_MARK_COPY;
+ }
+ else
+ {
+ /* Regular table, apply the appropriate lock type */
+ switch (strength)
+ {
+ case LCS_NONE:
+
+ /*
+ * We don't need a tuple lock, only the ability to re-fetch
+ * the row.
+ */
+ return ROW_MARK_REFERENCE;
+ break;
+ case LCS_FORKEYSHARE:
+ return ROW_MARK_KEYSHARE;
+ break;
+ case LCS_FORSHARE:
+ return ROW_MARK_SHARE;
+ break;
+ case LCS_FORNOKEYUPDATE:
+ return ROW_MARK_NOKEYEXCLUSIVE;
+ break;
+ case LCS_FORUPDATE:
+ return ROW_MARK_EXCLUSIVE;
+ break;
+ }
+ elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
+ return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
+ }
+}
+
+/*
+ * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
+ *
+ * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
+ * results back in *count_est and *offset_est. These variables are set to
+ * 0 if the corresponding clause is not present, and -1 if it's present
+ * but we couldn't estimate the value for it. (The "0" convention is OK
+ * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
+ * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
+ * usual practice of never estimating less than one row.) These values will
+ * be passed to create_limit_path, which see if you change this code.
+ *
+ * The return value is the suitably adjusted tuple_fraction to use for
+ * planning the query. This adjustment is not overridable, since it reflects
+ * plan actions that grouping_planner() will certainly take, not assumptions
+ * about context.
+ */
+static double
+preprocess_limit(PlannerInfo *root, double tuple_fraction,
+ int64 *offset_est, int64 *count_est)
+{
+ Query *parse = root->parse;
+ Node *est;
+ double limit_fraction;
+
+ /* Should not be called unless LIMIT or OFFSET */
+ Assert(parse->limitCount || parse->limitOffset);
+
+ /*
+ * Try to obtain the clause values. We use estimate_expression_value
+ * primarily because it can sometimes do something useful with Params.
+ */
+ if (parse->limitCount)
+ {
+ est = estimate_expression_value(root, parse->limitCount);
+ if (est && IsA(est, Const))
+ {
+ if (((Const *) est)->constisnull)
+ {
+ /* NULL indicates LIMIT ALL, ie, no limit */
+ *count_est = 0; /* treat as not present */
+ }
+ else
+ {
+ *count_est = DatumGetInt64(((Const *) est)->constvalue);
+ if (*count_est <= 0)
+ *count_est = 1; /* force to at least 1 */
+ }
+ }
+ else
+ *count_est = -1; /* can't estimate */
+ }
+ else
+ *count_est = 0; /* not present */
+
+ if (parse->limitOffset)
+ {
+ est = estimate_expression_value(root, parse->limitOffset);
+ if (est && IsA(est, Const))
+ {
+ if (((Const *) est)->constisnull)
+ {
+ /* Treat NULL as no offset; the executor will too */
+ *offset_est = 0; /* treat as not present */
+ }
+ else
+ {
+ *offset_est = DatumGetInt64(((Const *) est)->constvalue);
+ if (*offset_est < 0)
+ *offset_est = 0; /* treat as not present */
+ }
+ }
+ else
+ *offset_est = -1; /* can't estimate */
+ }
+ else
+ *offset_est = 0; /* not present */
+
+ if (*count_est != 0)
+ {
+ /*
+ * A LIMIT clause limits the absolute number of tuples returned.
+ * However, if it's not a constant LIMIT then we have to guess; for
+ * lack of a better idea, assume 10% of the plan's result is wanted.
+ */
+ if (*count_est < 0 || *offset_est < 0)
+ {
+ /* LIMIT or OFFSET is an expression ... punt ... */
+ limit_fraction = 0.10;
+ }
+ else
+ {
+ /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
+ limit_fraction = (double) *count_est + (double) *offset_est;
+ }
+
+ /*
+ * If we have absolute limits from both caller and LIMIT, use the
+ * smaller value; likewise if they are both fractional. If one is
+ * fractional and the other absolute, we can't easily determine which
+ * is smaller, but we use the heuristic that the absolute will usually
+ * be smaller.
+ */
+ if (tuple_fraction >= 1.0)
+ {
+ if (limit_fraction >= 1.0)
+ {
+ /* both absolute */
+ tuple_fraction = Min(tuple_fraction, limit_fraction);
+ }
+ else
+ {
+ /* caller absolute, limit fractional; use caller's value */
+ }
+ }
+ else if (tuple_fraction > 0.0)
+ {
+ if (limit_fraction >= 1.0)
+ {
+ /* caller fractional, limit absolute; use limit */
+ tuple_fraction = limit_fraction;
+ }
+ else
+ {
+ /* both fractional */
+ tuple_fraction = Min(tuple_fraction, limit_fraction);
+ }
+ }
+ else
+ {
+ /* no info from caller, just use limit */
+ tuple_fraction = limit_fraction;
+ }
+ }
+ else if (*offset_est != 0 && tuple_fraction > 0.0)
+ {
+ /*
+ * We have an OFFSET but no LIMIT. This acts entirely differently
+ * from the LIMIT case: here, we need to increase rather than decrease
+ * the caller's tuple_fraction, because the OFFSET acts to cause more
+ * tuples to be fetched instead of fewer. This only matters if we got
+ * a tuple_fraction > 0, however.
+ *
+ * As above, use 10% if OFFSET is present but unestimatable.
+ */
+ if (*offset_est < 0)
+ limit_fraction = 0.10;
+ else
+ limit_fraction = (double) *offset_est;
+
+ /*
+ * If we have absolute counts from both caller and OFFSET, add them
+ * together; likewise if they are both fractional. If one is
+ * fractional and the other absolute, we want to take the larger, and
+ * we heuristically assume that's the fractional one.
+ */
+ if (tuple_fraction >= 1.0)
+ {
+ if (limit_fraction >= 1.0)
+ {
+ /* both absolute, so add them together */
+ tuple_fraction += limit_fraction;
+ }
+ else
+ {
+ /* caller absolute, limit fractional; use limit */
+ tuple_fraction = limit_fraction;
+ }
+ }
+ else
+ {
+ if (limit_fraction >= 1.0)
+ {
+ /* caller fractional, limit absolute; use caller's value */
+ }
+ else
+ {
+ /* both fractional, so add them together */
+ tuple_fraction += limit_fraction;
+ if (tuple_fraction >= 1.0)
+ tuple_fraction = 0.0; /* assume fetch all */
+ }
+ }
+ }
+
+ return tuple_fraction;
+}
+
+/*
+ * limit_needed - do we actually need a Limit plan node?
+ *
+ * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
+ * a Limit node. This is worth checking for because "OFFSET 0" is a common
+ * locution for an optimization fence. (Because other places in the planner
+ * merely check whether parse->limitOffset isn't NULL, it will still work as
+ * an optimization fence --- we're just suppressing unnecessary run-time
+ * overhead.)
+ *
+ * This might look like it could be merged into preprocess_limit, but there's
+ * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
+ * in preprocess_limit it's good enough to consider estimated values.
+ */
+bool
+limit_needed(Query *parse)
+{
+ Node *node;
+
+ node = parse->limitCount;
+ if (node)
+ {
+ if (IsA(node, Const))
+ {
+ /* NULL indicates LIMIT ALL, ie, no limit */
+ if (!((Const *) node)->constisnull)
+ return true; /* LIMIT with a constant value */
+ }
+ else
+ return true; /* non-constant LIMIT */
+ }
+
+ node = parse->limitOffset;
+ if (node)
+ {
+ if (IsA(node, Const))
+ {
+ /* Treat NULL as no offset; the executor would too */
+ if (!((Const *) node)->constisnull)
+ {
+ int64 offset = DatumGetInt64(((Const *) node)->constvalue);
+
+ if (offset != 0)
+ return true; /* OFFSET with a nonzero value */
+ }
+ }
+ else
+ return true; /* non-constant OFFSET */
+ }
+
+ return false; /* don't need a Limit plan node */
+}
+
+
+/*
+ * remove_useless_groupby_columns
+ * Remove any columns in the GROUP BY clause that are redundant due to
+ * being functionally dependent on other GROUP BY columns.
+ *
+ * Since some other DBMSes do not allow references to ungrouped columns, it's
+ * not unusual to find all columns listed in GROUP BY even though listing the
+ * primary-key columns would be sufficient. Deleting such excess columns
+ * avoids redundant sorting work, so it's worth doing.
+ *
+ * Relcache invalidations will ensure that cached plans become invalidated
+ * when the underlying index of the pkey constraint is dropped.
+ *
+ * Currently, we only make use of pkey constraints for this, however, we may
+ * wish to take this further in the future and also use unique constraints
+ * which have NOT NULL columns. In that case, plan invalidation will still
+ * work since relations will receive a relcache invalidation when a NOT NULL
+ * constraint is dropped.
+ */
+static void
+remove_useless_groupby_columns(PlannerInfo *root)
+{
+ Query *parse = root->parse;
+ Bitmapset **groupbyattnos;
+ Bitmapset **surplusvars;
+ ListCell *lc;
+ int relid;
+
+ /* No chance to do anything if there are less than two GROUP BY items */
+ if (list_length(parse->groupClause) < 2)
+ return;
+
+ /* Don't fiddle with the GROUP BY clause if the query has grouping sets */
+ if (parse->groupingSets)
+ return;
+
+ /*
+ * Scan the GROUP BY clause to find GROUP BY items that are simple Vars.
+ * Fill groupbyattnos[k] with a bitmapset of the column attnos of RTE k
+ * that are GROUP BY items.
+ */
+ groupbyattnos = (Bitmapset **) palloc0(sizeof(Bitmapset *) *
+ (list_length(parse->rtable) + 1));
+ foreach(lc, parse->groupClause)
+ {
+ SortGroupClause *sgc = lfirst_node(SortGroupClause, lc);
+ TargetEntry *tle = get_sortgroupclause_tle(sgc, parse->targetList);
+ Var *var = (Var *) tle->expr;
+
+ /*
+ * Ignore non-Vars and Vars from other query levels.
+ *
+ * XXX in principle, stable expressions containing Vars could also be
+ * removed, if all the Vars are functionally dependent on other GROUP
+ * BY items. But it's not clear that such cases occur often enough to
+ * be worth troubling over.
+ */
+ if (!IsA(var, Var) ||
+ var->varlevelsup > 0)
+ continue;
+
+ /* OK, remember we have this Var */
+ relid = var->varno;
+ Assert(relid <= list_length(parse->rtable));
+ groupbyattnos[relid] = bms_add_member(groupbyattnos[relid],
+ var->varattno - FirstLowInvalidHeapAttributeNumber);
+ }
+
+ /*
+ * Consider each relation and see if it is possible to remove some of its
+ * Vars from GROUP BY. For simplicity and speed, we do the actual removal
+ * in a separate pass. Here, we just fill surplusvars[k] with a bitmapset
+ * of the column attnos of RTE k that are removable GROUP BY items.
+ */
+ surplusvars = NULL; /* don't allocate array unless required */
+ relid = 0;
+ foreach(lc, parse->rtable)
+ {
+ RangeTblEntry *rte = lfirst_node(RangeTblEntry, lc);
+ Bitmapset *relattnos;
+ Bitmapset *pkattnos;
+ Oid constraintOid;
+
+ relid++;
+
+ /* Only plain relations could have primary-key constraints */
+ if (rte->rtekind != RTE_RELATION)
+ continue;
+
+ /*
+ * We must skip inheritance parent tables as some of the child rels
+ * may cause duplicate rows. This cannot happen with partitioned
+ * tables, however.
+ */
+ if (rte->inh && rte->relkind != RELKIND_PARTITIONED_TABLE)
+ continue;
+
+ /* Nothing to do unless this rel has multiple Vars in GROUP BY */
+ relattnos = groupbyattnos[relid];
+ if (bms_membership(relattnos) != BMS_MULTIPLE)
+ continue;
+
+ /*
+ * Can't remove any columns for this rel if there is no suitable
+ * (i.e., nondeferrable) primary key constraint.
+ */
+ pkattnos = get_primary_key_attnos(rte->relid, false, &constraintOid);
+ if (pkattnos == NULL)
+ continue;
+
+ /*
+ * If the primary key is a proper subset of relattnos then we have
+ * some items in the GROUP BY that can be removed.
+ */
+ if (bms_subset_compare(pkattnos, relattnos) == BMS_SUBSET1)
+ {
+ /*
+ * To easily remember whether we've found anything to do, we don't
+ * allocate the surplusvars[] array until we find something.
+ */
+ if (surplusvars == NULL)
+ surplusvars = (Bitmapset **) palloc0(sizeof(Bitmapset *) *
+ (list_length(parse->rtable) + 1));
+
+ /* Remember the attnos of the removable columns */
+ surplusvars[relid] = bms_difference(relattnos, pkattnos);
+ }
+ }
+
+ /*
+ * If we found any surplus Vars, build a new GROUP BY clause without them.
+ * (Note: this may leave some TLEs with unreferenced ressortgroupref
+ * markings, but that's harmless.)
+ */
+ if (surplusvars != NULL)
+ {
+ List *new_groupby = NIL;
+
+ foreach(lc, parse->groupClause)
+ {
+ SortGroupClause *sgc = lfirst_node(SortGroupClause, lc);
+ TargetEntry *tle = get_sortgroupclause_tle(sgc, parse->targetList);
+ Var *var = (Var *) tle->expr;
+
+ /*
+ * New list must include non-Vars, outer Vars, and anything not
+ * marked as surplus.
+ */
+ if (!IsA(var, Var) ||
+ var->varlevelsup > 0 ||
+ !bms_is_member(var->varattno - FirstLowInvalidHeapAttributeNumber,
+ surplusvars[var->varno]))
+ new_groupby = lappend(new_groupby, sgc);
+ }
+
+ parse->groupClause = new_groupby;
+ }
+}
+
+/*
+ * preprocess_groupclause - do preparatory work on GROUP BY clause
+ *
+ * The idea here is to adjust the ordering of the GROUP BY elements
+ * (which in itself is semantically insignificant) to match ORDER BY,
+ * thereby allowing a single sort operation to both implement the ORDER BY
+ * requirement and set up for a Unique step that implements GROUP BY.
+ *
+ * In principle it might be interesting to consider other orderings of the
+ * GROUP BY elements, which could match the sort ordering of other
+ * possible plans (eg an indexscan) and thereby reduce cost. We don't
+ * bother with that, though. Hashed grouping will frequently win anyway.
+ *
+ * Note: we need no comparable processing of the distinctClause because
+ * the parser already enforced that that matches ORDER BY.
+ *
+ * For grouping sets, the order of items is instead forced to agree with that
+ * of the grouping set (and items not in the grouping set are skipped). The
+ * work of sorting the order of grouping set elements to match the ORDER BY if
+ * possible is done elsewhere.
+ */
+static List *
+preprocess_groupclause(PlannerInfo *root, List *force)
+{
+ Query *parse = root->parse;
+ List *new_groupclause = NIL;
+ bool partial_match;
+ ListCell *sl;
+ ListCell *gl;
+
+ /* For grouping sets, we need to force the ordering */
+ if (force)
+ {
+ foreach(sl, force)
+ {
+ Index ref = lfirst_int(sl);
+ SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
+
+ new_groupclause = lappend(new_groupclause, cl);
+ }
+
+ return new_groupclause;
+ }
+
+ /* If no ORDER BY, nothing useful to do here */
+ if (parse->sortClause == NIL)
+ return parse->groupClause;
+
+ /*
+ * Scan the ORDER BY clause and construct a list of matching GROUP BY
+ * items, but only as far as we can make a matching prefix.
+ *
+ * This code assumes that the sortClause contains no duplicate items.
+ */
+ foreach(sl, parse->sortClause)
+ {
+ SortGroupClause *sc = lfirst_node(SortGroupClause, sl);
+
+ foreach(gl, parse->groupClause)
+ {
+ SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
+
+ if (equal(gc, sc))
+ {
+ new_groupclause = lappend(new_groupclause, gc);
+ break;
+ }
+ }
+ if (gl == NULL)
+ break; /* no match, so stop scanning */
+ }
+
+ /* Did we match all of the ORDER BY list, or just some of it? */
+ partial_match = (sl != NULL);
+
+ /* If no match at all, no point in reordering GROUP BY */
+ if (new_groupclause == NIL)
+ return parse->groupClause;
+
+ /*
+ * Add any remaining GROUP BY items to the new list, but only if we were
+ * able to make a complete match. In other words, we only rearrange the
+ * GROUP BY list if the result is that one list is a prefix of the other
+ * --- otherwise there's no possibility of a common sort. Also, give up
+ * if there are any non-sortable GROUP BY items, since then there's no
+ * hope anyway.
+ */
+ foreach(gl, parse->groupClause)
+ {
+ SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
+
+ if (list_member_ptr(new_groupclause, gc))
+ continue; /* it matched an ORDER BY item */
+ if (partial_match)
+ return parse->groupClause; /* give up, no common sort possible */
+ if (!OidIsValid(gc->sortop))
+ return parse->groupClause; /* give up, GROUP BY can't be sorted */
+ new_groupclause = lappend(new_groupclause, gc);
+ }
+
+ /* Success --- install the rearranged GROUP BY list */
+ Assert(list_length(parse->groupClause) == list_length(new_groupclause));
+ return new_groupclause;
+}
+
+/*
+ * Extract lists of grouping sets that can be implemented using a single
+ * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
+ *
+ * Input must be sorted with smallest sets first. Result has each sublist
+ * sorted with smallest sets first.
+ *
+ * We want to produce the absolute minimum possible number of lists here to
+ * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
+ * of finding the minimal partition of a partially-ordered set into chains
+ * (which is what we need, taking the list of grouping sets as a poset ordered
+ * by set inclusion) can be mapped to the problem of finding the maximum
+ * cardinality matching on a bipartite graph, which is solvable in polynomial
+ * time with a worst case of no worse than O(n^2.5) and usually much
+ * better. Since our N is at most 4096, we don't need to consider fallbacks to
+ * heuristic or approximate methods. (Planning time for a 12-d cube is under
+ * half a second on my modest system even with optimization off and assertions
+ * on.)
+ */
+static List *
+extract_rollup_sets(List *groupingSets)
+{
+ int num_sets_raw = list_length(groupingSets);
+ int num_empty = 0;
+ int num_sets = 0; /* distinct sets */
+ int num_chains = 0;
+ List *result = NIL;
+ List **results;
+ List **orig_sets;
+ Bitmapset **set_masks;
+ int *chains;
+ short **adjacency;
+ short *adjacency_buf;
+ BipartiteMatchState *state;
+ int i;
+ int j;
+ int j_size;
+ ListCell *lc1 = list_head(groupingSets);
+ ListCell *lc;
+
+ /*
+ * Start by stripping out empty sets. The algorithm doesn't require this,
+ * but the planner currently needs all empty sets to be returned in the
+ * first list, so we strip them here and add them back after.
+ */
+ while (lc1 && lfirst(lc1) == NIL)
+ {
+ ++num_empty;
+ lc1 = lnext(groupingSets, lc1);
+ }
+
+ /* bail out now if it turns out that all we had were empty sets. */
+ if (!lc1)
+ return list_make1(groupingSets);
+
+ /*----------
+ * We don't strictly need to remove duplicate sets here, but if we don't,
+ * they tend to become scattered through the result, which is a bit
+ * confusing (and irritating if we ever decide to optimize them out).
+ * So we remove them here and add them back after.
+ *
+ * For each non-duplicate set, we fill in the following:
+ *
+ * orig_sets[i] = list of the original set lists
+ * set_masks[i] = bitmapset for testing inclusion
+ * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
+ *
+ * chains[i] will be the result group this set is assigned to.
+ *
+ * We index all of these from 1 rather than 0 because it is convenient
+ * to leave 0 free for the NIL node in the graph algorithm.
+ *----------
+ */
+ orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
+ set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
+ adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
+ adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
+
+ j_size = 0;
+ j = 0;
+ i = 1;
+
+ for_each_cell(lc, groupingSets, lc1)
+ {
+ List *candidate = (List *) lfirst(lc);
+ Bitmapset *candidate_set = NULL;
+ ListCell *lc2;
+ int dup_of = 0;
+
+ foreach(lc2, candidate)
+ {
+ candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
+ }
+
+ /* we can only be a dup if we're the same length as a previous set */
+ if (j_size == list_length(candidate))
+ {
+ int k;
+
+ for (k = j; k < i; ++k)
+ {
+ if (bms_equal(set_masks[k], candidate_set))
+ {
+ dup_of = k;
+ break;
+ }
+ }
+ }
+ else if (j_size < list_length(candidate))
+ {
+ j_size = list_length(candidate);
+ j = i;
+ }
+
+ if (dup_of > 0)
+ {
+ orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
+ bms_free(candidate_set);
+ }
+ else
+ {
+ int k;
+ int n_adj = 0;
+
+ orig_sets[i] = list_make1(candidate);
+ set_masks[i] = candidate_set;
+
+ /* fill in adjacency list; no need to compare equal-size sets */
+
+ for (k = j - 1; k > 0; --k)
+ {
+ if (bms_is_subset(set_masks[k], candidate_set))
+ adjacency_buf[++n_adj] = k;
+ }
+
+ if (n_adj > 0)
+ {
+ adjacency_buf[0] = n_adj;
+ adjacency[i] = palloc((n_adj + 1) * sizeof(short));
+ memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
+ }
+ else
+ adjacency[i] = NULL;
+
+ ++i;
+ }
+ }
+
+ num_sets = i - 1;
+
+ /*
+ * Apply the graph matching algorithm to do the work.
+ */
+ state = BipartiteMatch(num_sets, num_sets, adjacency);
+
+ /*
+ * Now, the state->pair* fields have the info we need to assign sets to
+ * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
+ * pair_vu[v] = u (both will be true, but we check both so that we can do
+ * it in one pass)
+ */
+ chains = palloc0((num_sets + 1) * sizeof(int));
+
+ for (i = 1; i <= num_sets; ++i)
+ {
+ int u = state->pair_vu[i];
+ int v = state->pair_uv[i];
+
+ if (u > 0 && u < i)
+ chains[i] = chains[u];
+ else if (v > 0 && v < i)
+ chains[i] = chains[v];
+ else
+ chains[i] = ++num_chains;
+ }
+
+ /* build result lists. */
+ results = palloc0((num_chains + 1) * sizeof(List *));
+
+ for (i = 1; i <= num_sets; ++i)
+ {
+ int c = chains[i];
+
+ Assert(c > 0);
+
+ results[c] = list_concat(results[c], orig_sets[i]);
+ }
+
+ /* push any empty sets back on the first list. */
+ while (num_empty-- > 0)
+ results[1] = lcons(NIL, results[1]);
+
+ /* make result list */
+ for (i = 1; i <= num_chains; ++i)
+ result = lappend(result, results[i]);
+
+ /*
+ * Free all the things.
+ *
+ * (This is over-fussy for small sets but for large sets we could have
+ * tied up a nontrivial amount of memory.)
+ */
+ BipartiteMatchFree(state);
+ pfree(results);
+ pfree(chains);
+ for (i = 1; i <= num_sets; ++i)
+ if (adjacency[i])
+ pfree(adjacency[i]);
+ pfree(adjacency);
+ pfree(adjacency_buf);
+ pfree(orig_sets);
+ for (i = 1; i <= num_sets; ++i)
+ bms_free(set_masks[i]);
+ pfree(set_masks);
+
+ return result;
+}
+
+/*
+ * Reorder the elements of a list of grouping sets such that they have correct
+ * prefix relationships. Also inserts the GroupingSetData annotations.
+ *
+ * The input must be ordered with smallest sets first; the result is returned
+ * with largest sets first. Note that the result shares no list substructure
+ * with the input, so it's safe for the caller to modify it later.
+ *
+ * If we're passed in a sortclause, we follow its order of columns to the
+ * extent possible, to minimize the chance that we add unnecessary sorts.
+ * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
+ * gets implemented in one pass.)
+ */
+static List *
+reorder_grouping_sets(List *groupingsets, List *sortclause)
+{
+ ListCell *lc;
+ List *previous = NIL;
+ List *result = NIL;
+
+ foreach(lc, groupingsets)
+ {
+ List *candidate = (List *) lfirst(lc);
+ List *new_elems = list_difference_int(candidate, previous);
+ GroupingSetData *gs = makeNode(GroupingSetData);
+
+ while (list_length(sortclause) > list_length(previous) &&
+ list_length(new_elems) > 0)
+ {
+ SortGroupClause *sc = list_nth(sortclause, list_length(previous));
+ int ref = sc->tleSortGroupRef;
+
+ if (list_member_int(new_elems, ref))
+ {
+ previous = lappend_int(previous, ref);
+ new_elems = list_delete_int(new_elems, ref);
+ }
+ else
+ {
+ /* diverged from the sortclause; give up on it */
+ sortclause = NIL;
+ break;
+ }
+ }
+
+ previous = list_concat(previous, new_elems);
+
+ gs->set = list_copy(previous);
+ result = lcons(gs, result);
+ }
+
+ list_free(previous);
+
+ return result;
+}
+
+/*
+ * Compute query_pathkeys and other pathkeys during plan generation
+ */
+static void
+standard_qp_callback(PlannerInfo *root, void *extra)
+{
+ Query *parse = root->parse;
+ standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
+ List *tlist = root->processed_tlist;
+ List *activeWindows = qp_extra->activeWindows;
+
+ /*
+ * Calculate pathkeys that represent grouping/ordering requirements. The
+ * sortClause is certainly sort-able, but GROUP BY and DISTINCT might not
+ * be, in which case we just leave their pathkeys empty.
+ */
+ if (qp_extra->groupClause &&
+ grouping_is_sortable(qp_extra->groupClause))
+ root->group_pathkeys =
+ make_pathkeys_for_sortclauses(root,
+ qp_extra->groupClause,
+ tlist);
+ else
+ root->group_pathkeys = NIL;
+
+ /* We consider only the first (bottom) window in pathkeys logic */
+ if (activeWindows != NIL)
+ {
+ WindowClause *wc = linitial_node(WindowClause, activeWindows);
+
+ root->window_pathkeys = make_pathkeys_for_window(root,
+ wc,
+ tlist);
+ }
+ else
+ root->window_pathkeys = NIL;
+
+ if (parse->distinctClause &&
+ grouping_is_sortable(parse->distinctClause))
+ root->distinct_pathkeys =
+ make_pathkeys_for_sortclauses(root,
+ parse->distinctClause,
+ tlist);
+ else
+ root->distinct_pathkeys = NIL;
+
+ root->sort_pathkeys =
+ make_pathkeys_for_sortclauses(root,
+ parse->sortClause,
+ tlist);
+
+ /*
+ * Figure out whether we want a sorted result from query_planner.
+ *
+ * If we have a sortable GROUP BY clause, then we want a result sorted
+ * properly for grouping. Otherwise, if we have window functions to
+ * evaluate, we try to sort for the first window. Otherwise, if there's a
+ * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
+ * we try to produce output that's sufficiently well sorted for the
+ * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
+ * by the ORDER BY clause.
+ *
+ * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
+ * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
+ * that might just leave us failing to exploit an available sort order at
+ * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
+ * much easier, since we know that the parser ensured that one is a
+ * superset of the other.
+ */
+ if (root->group_pathkeys)
+ root->query_pathkeys = root->group_pathkeys;
+ else if (root->window_pathkeys)
+ root->query_pathkeys = root->window_pathkeys;
+ else if (list_length(root->distinct_pathkeys) >
+ list_length(root->sort_pathkeys))
+ root->query_pathkeys = root->distinct_pathkeys;
+ else if (root->sort_pathkeys)
+ root->query_pathkeys = root->sort_pathkeys;
+ else
+ root->query_pathkeys = NIL;
+}
+
+/*
+ * Estimate number of groups produced by grouping clauses (1 if not grouping)
+ *
+ * path_rows: number of output rows from scan/join step
+ * gd: grouping sets data including list of grouping sets and their clauses
+ * target_list: target list containing group clause references
+ *
+ * If doing grouping sets, we also annotate the gsets data with the estimates
+ * for each set and each individual rollup list, with a view to later
+ * determining whether some combination of them could be hashed instead.
+ */
+static double
+get_number_of_groups(PlannerInfo *root,
+ double path_rows,
+ grouping_sets_data *gd,
+ List *target_list)
+{
+ Query *parse = root->parse;
+ double dNumGroups;
+
+ if (parse->groupClause)
+ {
+ List *groupExprs;
+
+ if (parse->groupingSets)
+ {
+ /* Add up the estimates for each grouping set */
+ ListCell *lc;
+ ListCell *lc2;
+
+ Assert(gd); /* keep Coverity happy */
+
+ dNumGroups = 0;
+
+ foreach(lc, gd->rollups)
+ {
+ RollupData *rollup = lfirst_node(RollupData, lc);
+ ListCell *lc;
+
+ groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
+ target_list);
+
+ rollup->numGroups = 0.0;
+
+ forboth(lc, rollup->gsets, lc2, rollup->gsets_data)
+ {
+ List *gset = (List *) lfirst(lc);
+ GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
+ double numGroups = estimate_num_groups(root,
+ groupExprs,
+ path_rows,
+ &gset,
+ NULL);
+
+ gs->numGroups = numGroups;
+ rollup->numGroups += numGroups;
+ }
+
+ dNumGroups += rollup->numGroups;
+ }
+
+ if (gd->hash_sets_idx)
+ {
+ ListCell *lc;
+
+ gd->dNumHashGroups = 0;
+
+ groupExprs = get_sortgrouplist_exprs(parse->groupClause,
+ target_list);
+
+ forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
+ {
+ List *gset = (List *) lfirst(lc);
+ GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
+ double numGroups = estimate_num_groups(root,
+ groupExprs,
+ path_rows,
+ &gset,
+ NULL);
+
+ gs->numGroups = numGroups;
+ gd->dNumHashGroups += numGroups;
+ }
+
+ dNumGroups += gd->dNumHashGroups;
+ }
+ }
+ else
+ {
+ /* Plain GROUP BY */
+ groupExprs = get_sortgrouplist_exprs(parse->groupClause,
+ target_list);
+
+ dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
+ NULL, NULL);
+ }
+ }
+ else if (parse->groupingSets)
+ {
+ /* Empty grouping sets ... one result row for each one */
+ dNumGroups = list_length(parse->groupingSets);
+ }
+ else if (parse->hasAggs || root->hasHavingQual)
+ {
+ /* Plain aggregation, one result row */
+ dNumGroups = 1;
+ }
+ else
+ {
+ /* Not grouping */
+ dNumGroups = 1;
+ }
+
+ return dNumGroups;
+}
+
+/*
+ * create_grouping_paths
+ *
+ * Build a new upperrel containing Paths for grouping and/or aggregation.
+ * Along the way, we also build an upperrel for Paths which are partially
+ * grouped and/or aggregated. A partially grouped and/or aggregated path
+ * needs a FinalizeAggregate node to complete the aggregation. Currently,
+ * the only partially grouped paths we build are also partial paths; that
+ * is, they need a Gather and then a FinalizeAggregate.
+ *
+ * input_rel: contains the source-data Paths
+ * target: the pathtarget for the result Paths to compute
+ * gd: grouping sets data including list of grouping sets and their clauses
+ *
+ * Note: all Paths in input_rel are expected to return the target computed
+ * by make_group_input_target.
+ */
+static RelOptInfo *
+create_grouping_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ PathTarget *target,
+ bool target_parallel_safe,
+ grouping_sets_data *gd)
+{
+ Query *parse = root->parse;
+ RelOptInfo *grouped_rel;
+ RelOptInfo *partially_grouped_rel;
+ AggClauseCosts agg_costs;
+
+ MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
+ get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &agg_costs);
+
+ /*
+ * Create grouping relation to hold fully aggregated grouping and/or
+ * aggregation paths.
+ */
+ grouped_rel = make_grouping_rel(root, input_rel, target,
+ target_parallel_safe, parse->havingQual);
+
+ /*
+ * Create either paths for a degenerate grouping or paths for ordinary
+ * grouping, as appropriate.
+ */
+ if (is_degenerate_grouping(root))
+ create_degenerate_grouping_paths(root, input_rel, grouped_rel);
+ else
+ {
+ int flags = 0;
+ GroupPathExtraData extra;
+
+ /*
+ * Determine whether it's possible to perform sort-based
+ * implementations of grouping. (Note that if groupClause is empty,
+ * grouping_is_sortable() is trivially true, and all the
+ * pathkeys_contained_in() tests will succeed too, so that we'll
+ * consider every surviving input path.)
+ *
+ * If we have grouping sets, we might be able to sort some but not all
+ * of them; in this case, we need can_sort to be true as long as we
+ * must consider any sorted-input plan.
+ */
+ if ((gd && gd->rollups != NIL)
+ || grouping_is_sortable(parse->groupClause))
+ flags |= GROUPING_CAN_USE_SORT;
+
+ /*
+ * Determine whether we should consider hash-based implementations of
+ * grouping.
+ *
+ * Hashed aggregation only applies if we're grouping. If we have
+ * grouping sets, some groups might be hashable but others not; in
+ * this case we set can_hash true as long as there is nothing globally
+ * preventing us from hashing (and we should therefore consider plans
+ * with hashes).
+ *
+ * Executor doesn't support hashed aggregation with DISTINCT or ORDER
+ * BY aggregates. (Doing so would imply storing *all* the input
+ * values in the hash table, and/or running many sorts in parallel,
+ * either of which seems like a certain loser.) We similarly don't
+ * support ordered-set aggregates in hashed aggregation, but that case
+ * is also included in the numOrderedAggs count.
+ *
+ * Note: grouping_is_hashable() is much more expensive to check than
+ * the other gating conditions, so we want to do it last.
+ */
+ if ((parse->groupClause != NIL &&
+ root->numOrderedAggs == 0 &&
+ (gd ? gd->any_hashable : grouping_is_hashable(parse->groupClause))))
+ flags |= GROUPING_CAN_USE_HASH;
+
+ /*
+ * Determine whether partial aggregation is possible.
+ */
+ if (can_partial_agg(root))
+ flags |= GROUPING_CAN_PARTIAL_AGG;
+
+ extra.flags = flags;
+ extra.target_parallel_safe = target_parallel_safe;
+ extra.havingQual = parse->havingQual;
+ extra.targetList = parse->targetList;
+ extra.partial_costs_set = false;
+
+ /*
+ * Determine whether partitionwise aggregation is in theory possible.
+ * It can be disabled by the user, and for now, we don't try to
+ * support grouping sets. create_ordinary_grouping_paths() will check
+ * additional conditions, such as whether input_rel is partitioned.
+ */
+ if (enable_partitionwise_aggregate && !parse->groupingSets)
+ extra.patype = PARTITIONWISE_AGGREGATE_FULL;
+ else
+ extra.patype = PARTITIONWISE_AGGREGATE_NONE;
+
+ create_ordinary_grouping_paths(root, input_rel, grouped_rel,
+ &agg_costs, gd, &extra,
+ &partially_grouped_rel);
+ }
+
+ set_cheapest(grouped_rel);
+ return grouped_rel;
+}
+
+/*
+ * make_grouping_rel
+ *
+ * Create a new grouping rel and set basic properties.
+ *
+ * input_rel represents the underlying scan/join relation.
+ * target is the output expected from the grouping relation.
+ */
+static RelOptInfo *
+make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
+ PathTarget *target, bool target_parallel_safe,
+ Node *havingQual)
+{
+ RelOptInfo *grouped_rel;
+
+ if (IS_OTHER_REL(input_rel))
+ {
+ grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
+ input_rel->relids);
+ grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
+ }
+ else
+ {
+ /*
+ * By tradition, the relids set for the main grouping relation is
+ * NULL. (This could be changed, but might require adjustments
+ * elsewhere.)
+ */
+ grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
+ }
+
+ /* Set target. */
+ grouped_rel->reltarget = target;
+
+ /*
+ * If the input relation is not parallel-safe, then the grouped relation
+ * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
+ * target list and HAVING quals are parallel-safe.
+ */
+ if (input_rel->consider_parallel && target_parallel_safe &&
+ is_parallel_safe(root, (Node *) havingQual))
+ grouped_rel->consider_parallel = true;
+
+ /*
+ * If the input rel belongs to a single FDW, so does the grouped rel.
+ */
+ grouped_rel->serverid = input_rel->serverid;
+ grouped_rel->userid = input_rel->userid;
+ grouped_rel->useridiscurrent = input_rel->useridiscurrent;
+ grouped_rel->fdwroutine = input_rel->fdwroutine;
+
+ return grouped_rel;
+}
+
+/*
+ * is_degenerate_grouping
+ *
+ * A degenerate grouping is one in which the query has a HAVING qual and/or
+ * grouping sets, but no aggregates and no GROUP BY (which implies that the
+ * grouping sets are all empty).
+ */
+static bool
+is_degenerate_grouping(PlannerInfo *root)
+{
+ Query *parse = root->parse;
+
+ return (root->hasHavingQual || parse->groupingSets) &&
+ !parse->hasAggs && parse->groupClause == NIL;
+}
+
+/*
+ * create_degenerate_grouping_paths
+ *
+ * When the grouping is degenerate (see is_degenerate_grouping), we are
+ * supposed to emit either zero or one row for each grouping set depending on
+ * whether HAVING succeeds. Furthermore, there cannot be any variables in
+ * either HAVING or the targetlist, so we actually do not need the FROM table
+ * at all! We can just throw away the plan-so-far and generate a Result node.
+ * This is a sufficiently unusual corner case that it's not worth contorting
+ * the structure of this module to avoid having to generate the earlier paths
+ * in the first place.
+ */
+static void
+create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
+ RelOptInfo *grouped_rel)
+{
+ Query *parse = root->parse;
+ int nrows;
+ Path *path;
+
+ nrows = list_length(parse->groupingSets);
+ if (nrows > 1)
+ {
+ /*
+ * Doesn't seem worthwhile writing code to cons up a generate_series
+ * or a values scan to emit multiple rows. Instead just make N clones
+ * and append them. (With a volatile HAVING clause, this means you
+ * might get between 0 and N output rows. Offhand I think that's
+ * desired.)
+ */
+ List *paths = NIL;
+
+ while (--nrows >= 0)
+ {
+ path = (Path *)
+ create_group_result_path(root, grouped_rel,
+ grouped_rel->reltarget,
+ (List *) parse->havingQual);
+ paths = lappend(paths, path);
+ }
+ path = (Path *)
+ create_append_path(root,
+ grouped_rel,
+ paths,
+ NIL,
+ NIL,
+ NULL,
+ 0,
+ false,
+ -1);
+ }
+ else
+ {
+ /* No grouping sets, or just one, so one output row */
+ path = (Path *)
+ create_group_result_path(root, grouped_rel,
+ grouped_rel->reltarget,
+ (List *) parse->havingQual);
+ }
+
+ add_path(grouped_rel, path);
+}
+
+/*
+ * create_ordinary_grouping_paths
+ *
+ * Create grouping paths for the ordinary (that is, non-degenerate) case.
+ *
+ * We need to consider sorted and hashed aggregation in the same function,
+ * because otherwise (1) it would be harder to throw an appropriate error
+ * message if neither way works, and (2) we should not allow hashtable size
+ * considerations to dissuade us from using hashing if sorting is not possible.
+ *
+ * *partially_grouped_rel_p will be set to the partially grouped rel which this
+ * function creates, or to NULL if it doesn't create one.
+ */
+static void
+create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
+ RelOptInfo *grouped_rel,
+ const AggClauseCosts *agg_costs,
+ grouping_sets_data *gd,
+ GroupPathExtraData *extra,
+ RelOptInfo **partially_grouped_rel_p)
+{
+ Path *cheapest_path = input_rel->cheapest_total_path;
+ RelOptInfo *partially_grouped_rel = NULL;
+ double dNumGroups;
+ PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE;
+
+ /*
+ * If this is the topmost grouping relation or if the parent relation is
+ * doing some form of partitionwise aggregation, then we may be able to do
+ * it at this level also. However, if the input relation is not
+ * partitioned, partitionwise aggregate is impossible.
+ */
+ if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
+ IS_PARTITIONED_REL(input_rel))
+ {
+ /*
+ * If this is the topmost relation or if the parent relation is doing
+ * full partitionwise aggregation, then we can do full partitionwise
+ * aggregation provided that the GROUP BY clause contains all of the
+ * partitioning columns at this level. Otherwise, we can do at most
+ * partial partitionwise aggregation. But if partial aggregation is
+ * not supported in general then we can't use it for partitionwise
+ * aggregation either.
+ */
+ if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
+ group_by_has_partkey(input_rel, extra->targetList,
+ root->parse->groupClause))
+ patype = PARTITIONWISE_AGGREGATE_FULL;
+ else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
+ patype = PARTITIONWISE_AGGREGATE_PARTIAL;
+ else
+ patype = PARTITIONWISE_AGGREGATE_NONE;
+ }
+
+ /*
+ * Before generating paths for grouped_rel, we first generate any possible
+ * partially grouped paths; that way, later code can easily consider both
+ * parallel and non-parallel approaches to grouping.
+ */
+ if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
+ {
+ bool force_rel_creation;
+
+ /*
+ * If we're doing partitionwise aggregation at this level, force
+ * creation of a partially_grouped_rel so we can add partitionwise
+ * paths to it.
+ */
+ force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
+
+ partially_grouped_rel =
+ create_partial_grouping_paths(root,
+ grouped_rel,
+ input_rel,
+ gd,
+ extra,
+ force_rel_creation);
+ }
+
+ /* Set out parameter. */
+ *partially_grouped_rel_p = partially_grouped_rel;
+
+ /* Apply partitionwise aggregation technique, if possible. */
+ if (patype != PARTITIONWISE_AGGREGATE_NONE)
+ create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
+ partially_grouped_rel, agg_costs,
+ gd, patype, extra);
+
+ /* If we are doing partial aggregation only, return. */
+ if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
+ {
+ Assert(partially_grouped_rel);
+
+ if (partially_grouped_rel->pathlist)
+ set_cheapest(partially_grouped_rel);
+
+ return;
+ }
+
+ /* Gather any partially grouped partial paths. */
+ if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
+ {
+ gather_grouping_paths(root, partially_grouped_rel);
+ set_cheapest(partially_grouped_rel);
+ }
+
+ /*
+ * Estimate number of groups.
+ */
+ dNumGroups = get_number_of_groups(root,
+ cheapest_path->rows,
+ gd,
+ extra->targetList);
+
+ /* Build final grouping paths */
+ add_paths_to_grouping_rel(root, input_rel, grouped_rel,
+ partially_grouped_rel, agg_costs, gd,
+ dNumGroups, extra);
+
+ /* Give a helpful error if we failed to find any implementation */
+ if (grouped_rel->pathlist == NIL)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("could not implement GROUP BY"),
+ errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
+
+ /*
+ * If there is an FDW that's responsible for all baserels of the query,
+ * let it consider adding ForeignPaths.
+ */
+ if (grouped_rel->fdwroutine &&
+ grouped_rel->fdwroutine->GetForeignUpperPaths)
+ grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
+ input_rel, grouped_rel,
+ extra);
+
+ /* Let extensions possibly add some more paths */
+ if (create_upper_paths_hook)
+ (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
+ input_rel, grouped_rel,
+ extra);
+}
+
+/*
+ * For a given input path, consider the possible ways of doing grouping sets on
+ * it, by combinations of hashing and sorting. This can be called multiple
+ * times, so it's important that it not scribble on input. No result is
+ * returned, but any generated paths are added to grouped_rel.
+ */
+static void
+consider_groupingsets_paths(PlannerInfo *root,
+ RelOptInfo *grouped_rel,
+ Path *path,
+ bool is_sorted,
+ bool can_hash,
+ grouping_sets_data *gd,
+ const AggClauseCosts *agg_costs,
+ double dNumGroups)
+{
+ Query *parse = root->parse;
+ Size hash_mem_limit = get_hash_memory_limit();
+
+ /*
+ * If we're not being offered sorted input, then only consider plans that
+ * can be done entirely by hashing.
+ *
+ * We can hash everything if it looks like it'll fit in hash_mem. But if
+ * the input is actually sorted despite not being advertised as such, we
+ * prefer to make use of that in order to use less memory.
+ *
+ * If none of the grouping sets are sortable, then ignore the hash_mem
+ * limit and generate a path anyway, since otherwise we'll just fail.
+ */
+ if (!is_sorted)
+ {
+ List *new_rollups = NIL;
+ RollupData *unhashed_rollup = NULL;
+ List *sets_data;
+ List *empty_sets_data = NIL;
+ List *empty_sets = NIL;
+ ListCell *lc;
+ ListCell *l_start = list_head(gd->rollups);
+ AggStrategy strat = AGG_HASHED;
+ double hashsize;
+ double exclude_groups = 0.0;
+
+ Assert(can_hash);
+
+ /*
+ * If the input is coincidentally sorted usefully (which can happen
+ * even if is_sorted is false, since that only means that our caller
+ * has set up the sorting for us), then save some hashtable space by
+ * making use of that. But we need to watch out for degenerate cases:
+ *
+ * 1) If there are any empty grouping sets, then group_pathkeys might
+ * be NIL if all non-empty grouping sets are unsortable. In this case,
+ * there will be a rollup containing only empty groups, and the
+ * pathkeys_contained_in test is vacuously true; this is ok.
+ *
+ * XXX: the above relies on the fact that group_pathkeys is generated
+ * from the first rollup. If we add the ability to consider multiple
+ * sort orders for grouping input, this assumption might fail.
+ *
+ * 2) If there are no empty sets and only unsortable sets, then the
+ * rollups list will be empty (and thus l_start == NULL), and
+ * group_pathkeys will be NIL; we must ensure that the vacuously-true
+ * pathkeys_contained_in test doesn't cause us to crash.
+ */
+ if (l_start != NULL &&
+ pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
+ {
+ unhashed_rollup = lfirst_node(RollupData, l_start);
+ exclude_groups = unhashed_rollup->numGroups;
+ l_start = lnext(gd->rollups, l_start);
+ }
+
+ hashsize = estimate_hashagg_tablesize(root,
+ path,
+ agg_costs,
+ dNumGroups - exclude_groups);
+
+ /*
+ * gd->rollups is empty if we have only unsortable columns to work
+ * with. Override hash_mem in that case; otherwise, we'll rely on the
+ * sorted-input case to generate usable mixed paths.
+ */
+ if (hashsize > hash_mem_limit && gd->rollups)
+ return; /* nope, won't fit */
+
+ /*
+ * We need to burst the existing rollups list into individual grouping
+ * sets and recompute a groupClause for each set.
+ */
+ sets_data = list_copy(gd->unsortable_sets);
+
+ for_each_cell(lc, gd->rollups, l_start)
+ {
+ RollupData *rollup = lfirst_node(RollupData, lc);
+
+ /*
+ * If we find an unhashable rollup that's not been skipped by the
+ * "actually sorted" check above, we can't cope; we'd need sorted
+ * input (with a different sort order) but we can't get that here.
+ * So bail out; we'll get a valid path from the is_sorted case
+ * instead.
+ *
+ * The mere presence of empty grouping sets doesn't make a rollup
+ * unhashable (see preprocess_grouping_sets), we handle those
+ * specially below.
+ */
+ if (!rollup->hashable)
+ return;
+
+ sets_data = list_concat(sets_data, rollup->gsets_data);
+ }
+ foreach(lc, sets_data)
+ {
+ GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
+ List *gset = gs->set;
+ RollupData *rollup;
+
+ if (gset == NIL)
+ {
+ /* Empty grouping sets can't be hashed. */
+ empty_sets_data = lappend(empty_sets_data, gs);
+ empty_sets = lappend(empty_sets, NIL);
+ }
+ else
+ {
+ rollup = makeNode(RollupData);
+
+ rollup->groupClause = preprocess_groupclause(root, gset);
+ rollup->gsets_data = list_make1(gs);
+ rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
+ rollup->gsets_data,
+ gd->tleref_to_colnum_map);
+ rollup->numGroups = gs->numGroups;
+ rollup->hashable = true;
+ rollup->is_hashed = true;
+ new_rollups = lappend(new_rollups, rollup);
+ }
+ }
+
+ /*
+ * If we didn't find anything nonempty to hash, then bail. We'll
+ * generate a path from the is_sorted case.
+ */
+ if (new_rollups == NIL)
+ return;
+
+ /*
+ * If there were empty grouping sets they should have been in the
+ * first rollup.
+ */
+ Assert(!unhashed_rollup || !empty_sets);
+
+ if (unhashed_rollup)
+ {
+ new_rollups = lappend(new_rollups, unhashed_rollup);
+ strat = AGG_MIXED;
+ }
+ else if (empty_sets)
+ {
+ RollupData *rollup = makeNode(RollupData);
+
+ rollup->groupClause = NIL;
+ rollup->gsets_data = empty_sets_data;
+ rollup->gsets = empty_sets;
+ rollup->numGroups = list_length(empty_sets);
+ rollup->hashable = false;
+ rollup->is_hashed = false;
+ new_rollups = lappend(new_rollups, rollup);
+ strat = AGG_MIXED;
+ }
+
+ add_path(grouped_rel, (Path *)
+ create_groupingsets_path(root,
+ grouped_rel,
+ path,
+ (List *) parse->havingQual,
+ strat,
+ new_rollups,
+ agg_costs,
+ dNumGroups));
+ return;
+ }
+
+ /*
+ * If we have sorted input but nothing we can do with it, bail.
+ */
+ if (list_length(gd->rollups) == 0)
+ return;
+
+ /*
+ * Given sorted input, we try and make two paths: one sorted and one mixed
+ * sort/hash. (We need to try both because hashagg might be disabled, or
+ * some columns might not be sortable.)
+ *
+ * can_hash is passed in as false if some obstacle elsewhere (such as
+ * ordered aggs) means that we shouldn't consider hashing at all.
+ */
+ if (can_hash && gd->any_hashable)
+ {
+ List *rollups = NIL;
+ List *hash_sets = list_copy(gd->unsortable_sets);
+ double availspace = hash_mem_limit;
+ ListCell *lc;
+
+ /*
+ * Account first for space needed for groups we can't sort at all.
+ */
+ availspace -= estimate_hashagg_tablesize(root,
+ path,
+ agg_costs,
+ gd->dNumHashGroups);
+
+ if (availspace > 0 && list_length(gd->rollups) > 1)
+ {
+ double scale;
+ int num_rollups = list_length(gd->rollups);
+ int k_capacity;
+ int *k_weights = palloc(num_rollups * sizeof(int));
+ Bitmapset *hash_items = NULL;
+ int i;
+
+ /*
+ * We treat this as a knapsack problem: the knapsack capacity
+ * represents hash_mem, the item weights are the estimated memory
+ * usage of the hashtables needed to implement a single rollup,
+ * and we really ought to use the cost saving as the item value;
+ * however, currently the costs assigned to sort nodes don't
+ * reflect the comparison costs well, and so we treat all items as
+ * of equal value (each rollup we hash instead saves us one sort).
+ *
+ * To use the discrete knapsack, we need to scale the values to a
+ * reasonably small bounded range. We choose to allow a 5% error
+ * margin; we have no more than 4096 rollups in the worst possible
+ * case, which with a 5% error margin will require a bit over 42MB
+ * of workspace. (Anyone wanting to plan queries that complex had
+ * better have the memory for it. In more reasonable cases, with
+ * no more than a couple of dozen rollups, the memory usage will
+ * be negligible.)
+ *
+ * k_capacity is naturally bounded, but we clamp the values for
+ * scale and weight (below) to avoid overflows or underflows (or
+ * uselessly trying to use a scale factor less than 1 byte).
+ */
+ scale = Max(availspace / (20.0 * num_rollups), 1.0);
+ k_capacity = (int) floor(availspace / scale);
+
+ /*
+ * We leave the first rollup out of consideration since it's the
+ * one that matches the input sort order. We assign indexes "i"
+ * to only those entries considered for hashing; the second loop,
+ * below, must use the same condition.
+ */
+ i = 0;
+ for_each_from(lc, gd->rollups, 1)
+ {
+ RollupData *rollup = lfirst_node(RollupData, lc);
+
+ if (rollup->hashable)
+ {
+ double sz = estimate_hashagg_tablesize(root,
+ path,
+ agg_costs,
+ rollup->numGroups);
+
+ /*
+ * If sz is enormous, but hash_mem (and hence scale) is
+ * small, avoid integer overflow here.
+ */
+ k_weights[i] = (int) Min(floor(sz / scale),
+ k_capacity + 1.0);
+ ++i;
+ }
+ }
+
+ /*
+ * Apply knapsack algorithm; compute the set of items which
+ * maximizes the value stored (in this case the number of sorts
+ * saved) while keeping the total size (approximately) within
+ * capacity.
+ */
+ if (i > 0)
+ hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
+
+ if (!bms_is_empty(hash_items))
+ {
+ rollups = list_make1(linitial(gd->rollups));
+
+ i = 0;
+ for_each_from(lc, gd->rollups, 1)
+ {
+ RollupData *rollup = lfirst_node(RollupData, lc);
+
+ if (rollup->hashable)
+ {
+ if (bms_is_member(i, hash_items))
+ hash_sets = list_concat(hash_sets,
+ rollup->gsets_data);
+ else
+ rollups = lappend(rollups, rollup);
+ ++i;
+ }
+ else
+ rollups = lappend(rollups, rollup);
+ }
+ }
+ }
+
+ if (!rollups && hash_sets)
+ rollups = list_copy(gd->rollups);
+
+ foreach(lc, hash_sets)
+ {
+ GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
+ RollupData *rollup = makeNode(RollupData);
+
+ Assert(gs->set != NIL);
+
+ rollup->groupClause = preprocess_groupclause(root, gs->set);
+ rollup->gsets_data = list_make1(gs);
+ rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
+ rollup->gsets_data,
+ gd->tleref_to_colnum_map);
+ rollup->numGroups = gs->numGroups;
+ rollup->hashable = true;
+ rollup->is_hashed = true;
+ rollups = lcons(rollup, rollups);
+ }
+
+ if (rollups)
+ {
+ add_path(grouped_rel, (Path *)
+ create_groupingsets_path(root,
+ grouped_rel,
+ path,
+ (List *) parse->havingQual,
+ AGG_MIXED,
+ rollups,
+ agg_costs,
+ dNumGroups));
+ }
+ }
+
+ /*
+ * Now try the simple sorted case.
+ */
+ if (!gd->unsortable_sets)
+ add_path(grouped_rel, (Path *)
+ create_groupingsets_path(root,
+ grouped_rel,
+ path,
+ (List *) parse->havingQual,
+ AGG_SORTED,
+ gd->rollups,
+ agg_costs,
+ dNumGroups));
+}
+
+/*
+ * create_window_paths
+ *
+ * Build a new upperrel containing Paths for window-function evaluation.
+ *
+ * input_rel: contains the source-data Paths
+ * input_target: result of make_window_input_target
+ * output_target: what the topmost WindowAggPath should return
+ * wflists: result of find_window_functions
+ * activeWindows: result of select_active_windows
+ *
+ * Note: all Paths in input_rel are expected to return input_target.
+ */
+static RelOptInfo *
+create_window_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ PathTarget *input_target,
+ PathTarget *output_target,
+ bool output_target_parallel_safe,
+ WindowFuncLists *wflists,
+ List *activeWindows)
+{
+ RelOptInfo *window_rel;
+ ListCell *lc;
+
+ /* For now, do all work in the (WINDOW, NULL) upperrel */
+ window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
+
+ /*
+ * If the input relation is not parallel-safe, then the window relation
+ * can't be parallel-safe, either. Otherwise, we need to examine the
+ * target list and active windows for non-parallel-safe constructs.
+ */
+ if (input_rel->consider_parallel && output_target_parallel_safe &&
+ is_parallel_safe(root, (Node *) activeWindows))
+ window_rel->consider_parallel = true;
+
+ /*
+ * If the input rel belongs to a single FDW, so does the window rel.
+ */
+ window_rel->serverid = input_rel->serverid;
+ window_rel->userid = input_rel->userid;
+ window_rel->useridiscurrent = input_rel->useridiscurrent;
+ window_rel->fdwroutine = input_rel->fdwroutine;
+
+ /*
+ * Consider computing window functions starting from the existing
+ * cheapest-total path (which will likely require a sort) as well as any
+ * existing paths that satisfy or partially satisfy root->window_pathkeys.
+ */
+ foreach(lc, input_rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+ int presorted_keys;
+
+ if (path == input_rel->cheapest_total_path ||
+ pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
+ &presorted_keys) ||
+ presorted_keys > 0)
+ create_one_window_path(root,
+ window_rel,
+ path,
+ input_target,
+ output_target,
+ wflists,
+ activeWindows);
+ }
+
+ /*
+ * If there is an FDW that's responsible for all baserels of the query,
+ * let it consider adding ForeignPaths.
+ */
+ if (window_rel->fdwroutine &&
+ window_rel->fdwroutine->GetForeignUpperPaths)
+ window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
+ input_rel, window_rel,
+ NULL);
+
+ /* Let extensions possibly add some more paths */
+ if (create_upper_paths_hook)
+ (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
+ input_rel, window_rel, NULL);
+
+ /* Now choose the best path(s) */
+ set_cheapest(window_rel);
+
+ return window_rel;
+}
+
+/*
+ * Stack window-function implementation steps atop the given Path, and
+ * add the result to window_rel.
+ *
+ * window_rel: upperrel to contain result
+ * path: input Path to use (must return input_target)
+ * input_target: result of make_window_input_target
+ * output_target: what the topmost WindowAggPath should return
+ * wflists: result of find_window_functions
+ * activeWindows: result of select_active_windows
+ */
+static void
+create_one_window_path(PlannerInfo *root,
+ RelOptInfo *window_rel,
+ Path *path,
+ PathTarget *input_target,
+ PathTarget *output_target,
+ WindowFuncLists *wflists,
+ List *activeWindows)
+{
+ PathTarget *window_target;
+ ListCell *l;
+ List *topqual = NIL;
+
+ /*
+ * Since each window clause could require a different sort order, we stack
+ * up a WindowAgg node for each clause, with sort steps between them as
+ * needed. (We assume that select_active_windows chose a good order for
+ * executing the clauses in.)
+ *
+ * input_target should contain all Vars and Aggs needed for the result.
+ * (In some cases we wouldn't need to propagate all of these all the way
+ * to the top, since they might only be needed as inputs to WindowFuncs.
+ * It's probably not worth trying to optimize that though.) It must also
+ * contain all window partitioning and sorting expressions, to ensure
+ * they're computed only once at the bottom of the stack (that's critical
+ * for volatile functions). As we climb up the stack, we'll add outputs
+ * for the WindowFuncs computed at each level.
+ */
+ window_target = input_target;
+
+ foreach(l, activeWindows)
+ {
+ WindowClause *wc = lfirst_node(WindowClause, l);
+ List *window_pathkeys;
+ int presorted_keys;
+ bool is_sorted;
+ bool topwindow;
+
+ window_pathkeys = make_pathkeys_for_window(root,
+ wc,
+ root->processed_tlist);
+
+ is_sorted = pathkeys_count_contained_in(window_pathkeys,
+ path->pathkeys,
+ &presorted_keys);
+
+ /* Sort if necessary */
+ if (!is_sorted)
+ {
+ /*
+ * No presorted keys or incremental sort disabled, just perform a
+ * complete sort.
+ */
+ if (presorted_keys == 0 || !enable_incremental_sort)
+ path = (Path *) create_sort_path(root, window_rel,
+ path,
+ window_pathkeys,
+ -1.0);
+ else
+ {
+ /*
+ * Since we have presorted keys and incremental sort is
+ * enabled, just use incremental sort.
+ */
+ path = (Path *) create_incremental_sort_path(root,
+ window_rel,
+ path,
+ window_pathkeys,
+ presorted_keys,
+ -1.0);
+ }
+ }
+
+ if (lnext(activeWindows, l))
+ {
+ /*
+ * Add the current WindowFuncs to the output target for this
+ * intermediate WindowAggPath. We must copy window_target to
+ * avoid changing the previous path's target.
+ *
+ * Note: a WindowFunc adds nothing to the target's eval costs; but
+ * we do need to account for the increase in tlist width.
+ */
+ ListCell *lc2;
+
+ window_target = copy_pathtarget(window_target);
+ foreach(lc2, wflists->windowFuncs[wc->winref])
+ {
+ WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
+
+ add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
+ window_target->width += get_typavgwidth(wfunc->wintype, -1);
+ }
+ }
+ else
+ {
+ /* Install the goal target in the topmost WindowAgg */
+ window_target = output_target;
+ }
+
+ /* mark the final item in the list as the top-level window */
+ topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
+
+ /*
+ * Accumulate all of the runConditions from each intermediate
+ * WindowClause. The top-level WindowAgg must pass these as a qual so
+ * that it filters out unwanted tuples correctly.
+ */
+ if (!topwindow)
+ topqual = list_concat(topqual, wc->runCondition);
+
+ path = (Path *)
+ create_windowagg_path(root, window_rel, path, window_target,
+ wflists->windowFuncs[wc->winref],
+ wc, topwindow ? topqual : NIL, topwindow);
+ }
+
+ add_path(window_rel, path);
+}
+
+/*
+ * create_distinct_paths
+ *
+ * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
+ *
+ * input_rel: contains the source-data Paths
+ *
+ * Note: input paths should already compute the desired pathtarget, since
+ * Sort/Unique won't project anything.
+ */
+static RelOptInfo *
+create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel)
+{
+ RelOptInfo *distinct_rel;
+
+ /* For now, do all work in the (DISTINCT, NULL) upperrel */
+ distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
+
+ /*
+ * We don't compute anything at this level, so distinct_rel will be
+ * parallel-safe if the input rel is parallel-safe. In particular, if
+ * there is a DISTINCT ON (...) clause, any path for the input_rel will
+ * output those expressions, and will not be parallel-safe unless those
+ * expressions are parallel-safe.
+ */
+ distinct_rel->consider_parallel = input_rel->consider_parallel;
+
+ /*
+ * If the input rel belongs to a single FDW, so does the distinct_rel.
+ */
+ distinct_rel->serverid = input_rel->serverid;
+ distinct_rel->userid = input_rel->userid;
+ distinct_rel->useridiscurrent = input_rel->useridiscurrent;
+ distinct_rel->fdwroutine = input_rel->fdwroutine;
+
+ /* build distinct paths based on input_rel's pathlist */
+ create_final_distinct_paths(root, input_rel, distinct_rel);
+
+ /* now build distinct paths based on input_rel's partial_pathlist */
+ create_partial_distinct_paths(root, input_rel, distinct_rel);
+
+ /* Give a helpful error if we failed to create any paths */
+ if (distinct_rel->pathlist == NIL)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("could not implement DISTINCT"),
+ errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
+
+ /*
+ * If there is an FDW that's responsible for all baserels of the query,
+ * let it consider adding ForeignPaths.
+ */
+ if (distinct_rel->fdwroutine &&
+ distinct_rel->fdwroutine->GetForeignUpperPaths)
+ distinct_rel->fdwroutine->GetForeignUpperPaths(root,
+ UPPERREL_DISTINCT,
+ input_rel,
+ distinct_rel,
+ NULL);
+
+ /* Let extensions possibly add some more paths */
+ if (create_upper_paths_hook)
+ (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
+ distinct_rel, NULL);
+
+ /* Now choose the best path(s) */
+ set_cheapest(distinct_rel);
+
+ return distinct_rel;
+}
+
+/*
+ * create_partial_distinct_paths
+ *
+ * Process 'input_rel' partial paths and add unique/aggregate paths to the
+ * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
+ * paths on top and add a final unique/aggregate path to remove any duplicate
+ * produced from combining rows from parallel workers.
+ */
+static void
+create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
+ RelOptInfo *final_distinct_rel)
+{
+ RelOptInfo *partial_distinct_rel;
+ Query *parse;
+ List *distinctExprs;
+ double numDistinctRows;
+ Path *cheapest_partial_path;
+ ListCell *lc;
+
+ /* nothing to do when there are no partial paths in the input rel */
+ if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
+ return;
+
+ parse = root->parse;
+
+ /* can't do parallel DISTINCT ON */
+ if (parse->hasDistinctOn)
+ return;
+
+ partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
+ NULL);
+ partial_distinct_rel->reltarget = root->upper_targets[UPPERREL_PARTIAL_DISTINCT];
+ partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
+
+ /*
+ * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
+ */
+ partial_distinct_rel->serverid = input_rel->serverid;
+ partial_distinct_rel->userid = input_rel->userid;
+ partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
+ partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
+
+ cheapest_partial_path = linitial(input_rel->partial_pathlist);
+
+ distinctExprs = get_sortgrouplist_exprs(parse->distinctClause,
+ parse->targetList);
+
+ /* estimate how many distinct rows we'll get from each worker */
+ numDistinctRows = estimate_num_groups(root, distinctExprs,
+ cheapest_partial_path->rows,
+ NULL, NULL);
+
+ /* first try adding unique paths atop of sorted paths */
+ if (grouping_is_sortable(parse->distinctClause))
+ {
+ foreach(lc, input_rel->partial_pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+
+ if (pathkeys_contained_in(root->distinct_pathkeys, path->pathkeys))
+ {
+ add_partial_path(partial_distinct_rel, (Path *)
+ create_upper_unique_path(root,
+ partial_distinct_rel,
+ path,
+ list_length(root->distinct_pathkeys),
+ numDistinctRows));
+ }
+ }
+ }
+
+ /*
+ * Now try hash aggregate paths, if enabled and hashing is possible. Since
+ * we're not on the hook to ensure we do our best to create at least one
+ * path here, we treat enable_hashagg as a hard off-switch rather than the
+ * slightly softer variant in create_final_distinct_paths.
+ */
+ if (enable_hashagg && grouping_is_hashable(parse->distinctClause))
+ {
+ add_partial_path(partial_distinct_rel, (Path *)
+ create_agg_path(root,
+ partial_distinct_rel,
+ cheapest_partial_path,
+ cheapest_partial_path->pathtarget,
+ AGG_HASHED,
+ AGGSPLIT_SIMPLE,
+ parse->distinctClause,
+ NIL,
+ NULL,
+ numDistinctRows));
+ }
+
+ /*
+ * If there is an FDW that's responsible for all baserels of the query,
+ * let it consider adding ForeignPaths.
+ */
+ if (partial_distinct_rel->fdwroutine &&
+ partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
+ partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
+ UPPERREL_PARTIAL_DISTINCT,
+ input_rel,
+ partial_distinct_rel,
+ NULL);
+
+ /* Let extensions possibly add some more partial paths */
+ if (create_upper_paths_hook)
+ (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
+ input_rel, partial_distinct_rel, NULL);
+
+ if (partial_distinct_rel->partial_pathlist != NIL)
+ {
+ generate_gather_paths(root, partial_distinct_rel, true);
+ set_cheapest(partial_distinct_rel);
+
+ /*
+ * Finally, create paths to distinctify the final result. This step
+ * is needed to remove any duplicates due to combining rows from
+ * parallel workers.
+ */
+ create_final_distinct_paths(root, partial_distinct_rel,
+ final_distinct_rel);
+ }
+}
+
+/*
+ * create_final_distinct_paths
+ * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
+ *
+ * input_rel: contains the source-data paths
+ * distinct_rel: destination relation for storing created paths
+ */
+static RelOptInfo *
+create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
+ RelOptInfo *distinct_rel)
+{
+ Query *parse = root->parse;
+ Path *cheapest_input_path = input_rel->cheapest_total_path;
+ double numDistinctRows;
+ bool allow_hash;
+ Path *path;
+ ListCell *lc;
+
+ /* Estimate number of distinct rows there will be */
+ if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
+ root->hasHavingQual)
+ {
+ /*
+ * If there was grouping or aggregation, use the number of input rows
+ * as the estimated number of DISTINCT rows (ie, assume the input is
+ * already mostly unique).
+ */
+ numDistinctRows = cheapest_input_path->rows;
+ }
+ else
+ {
+ /*
+ * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
+ */
+ List *distinctExprs;
+
+ distinctExprs = get_sortgrouplist_exprs(parse->distinctClause,
+ parse->targetList);
+ numDistinctRows = estimate_num_groups(root, distinctExprs,
+ cheapest_input_path->rows,
+ NULL, NULL);
+ }
+
+ /*
+ * Consider sort-based implementations of DISTINCT, if possible.
+ */
+ if (grouping_is_sortable(parse->distinctClause))
+ {
+ /*
+ * First, if we have any adequately-presorted paths, just stick a
+ * Unique node on those. Then consider doing an explicit sort of the
+ * cheapest input path and Unique'ing that.
+ *
+ * When we have DISTINCT ON, we must sort by the more rigorous of
+ * DISTINCT and ORDER BY, else it won't have the desired behavior.
+ * Also, if we do have to do an explicit sort, we might as well use
+ * the more rigorous ordering to avoid a second sort later. (Note
+ * that the parser will have ensured that one clause is a prefix of
+ * the other.)
+ */
+ List *needed_pathkeys;
+
+ if (parse->hasDistinctOn &&
+ list_length(root->distinct_pathkeys) <
+ list_length(root->sort_pathkeys))
+ needed_pathkeys = root->sort_pathkeys;
+ else
+ needed_pathkeys = root->distinct_pathkeys;
+
+ foreach(lc, input_rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+
+ if (pathkeys_contained_in(needed_pathkeys, path->pathkeys))
+ {
+ add_path(distinct_rel, (Path *)
+ create_upper_unique_path(root, distinct_rel,
+ path,
+ list_length(root->distinct_pathkeys),
+ numDistinctRows));
+ }
+ }
+
+ /* For explicit-sort case, always use the more rigorous clause */
+ if (list_length(root->distinct_pathkeys) <
+ list_length(root->sort_pathkeys))
+ {
+ needed_pathkeys = root->sort_pathkeys;
+ /* Assert checks that parser didn't mess up... */
+ Assert(pathkeys_contained_in(root->distinct_pathkeys,
+ needed_pathkeys));
+ }
+ else
+ needed_pathkeys = root->distinct_pathkeys;
+
+ path = cheapest_input_path;
+ if (!pathkeys_contained_in(needed_pathkeys, path->pathkeys))
+ path = (Path *) create_sort_path(root, distinct_rel,
+ path,
+ needed_pathkeys,
+ -1.0);
+
+ add_path(distinct_rel, (Path *)
+ create_upper_unique_path(root, distinct_rel,
+ path,
+ list_length(root->distinct_pathkeys),
+ numDistinctRows));
+ }
+
+ /*
+ * Consider hash-based implementations of DISTINCT, if possible.
+ *
+ * If we were not able to make any other types of path, we *must* hash or
+ * die trying. If we do have other choices, there are two things that
+ * should prevent selection of hashing: if the query uses DISTINCT ON
+ * (because it won't really have the expected behavior if we hash), or if
+ * enable_hashagg is off.
+ *
+ * Note: grouping_is_hashable() is much more expensive to check than the
+ * other gating conditions, so we want to do it last.
+ */
+ if (distinct_rel->pathlist == NIL)
+ allow_hash = true; /* we have no alternatives */
+ else if (parse->hasDistinctOn || !enable_hashagg)
+ allow_hash = false; /* policy-based decision not to hash */
+ else
+ allow_hash = true; /* default */
+
+ if (allow_hash && grouping_is_hashable(parse->distinctClause))
+ {
+ /* Generate hashed aggregate path --- no sort needed */
+ add_path(distinct_rel, (Path *)
+ create_agg_path(root,
+ distinct_rel,
+ cheapest_input_path,
+ cheapest_input_path->pathtarget,
+ AGG_HASHED,
+ AGGSPLIT_SIMPLE,
+ parse->distinctClause,
+ NIL,
+ NULL,
+ numDistinctRows));
+ }
+
+ return distinct_rel;
+}
+
+/*
+ * create_ordered_paths
+ *
+ * Build a new upperrel containing Paths for ORDER BY evaluation.
+ *
+ * All paths in the result must satisfy the ORDER BY ordering.
+ * The only new paths we need consider are an explicit full sort
+ * and incremental sort on the cheapest-total existing path.
+ *
+ * input_rel: contains the source-data Paths
+ * target: the output tlist the result Paths must emit
+ * limit_tuples: estimated bound on the number of output tuples,
+ * or -1 if no LIMIT or couldn't estimate
+ *
+ * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
+ * other pathkeys (grouping, ...) like generate_useful_gather_paths.
+ */
+static RelOptInfo *
+create_ordered_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ PathTarget *target,
+ bool target_parallel_safe,
+ double limit_tuples)
+{
+ Path *cheapest_input_path = input_rel->cheapest_total_path;
+ RelOptInfo *ordered_rel;
+ ListCell *lc;
+
+ /* For now, do all work in the (ORDERED, NULL) upperrel */
+ ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
+
+ /*
+ * If the input relation is not parallel-safe, then the ordered relation
+ * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
+ * target list is parallel-safe.
+ */
+ if (input_rel->consider_parallel && target_parallel_safe)
+ ordered_rel->consider_parallel = true;
+
+ /*
+ * If the input rel belongs to a single FDW, so does the ordered_rel.
+ */
+ ordered_rel->serverid = input_rel->serverid;
+ ordered_rel->userid = input_rel->userid;
+ ordered_rel->useridiscurrent = input_rel->useridiscurrent;
+ ordered_rel->fdwroutine = input_rel->fdwroutine;
+
+ foreach(lc, input_rel->pathlist)
+ {
+ Path *input_path = (Path *) lfirst(lc);
+ Path *sorted_path = input_path;
+ bool is_sorted;
+ int presorted_keys;
+
+ is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
+ input_path->pathkeys, &presorted_keys);
+
+ if (is_sorted)
+ {
+ /* Use the input path as is, but add a projection step if needed */
+ if (sorted_path->pathtarget != target)
+ sorted_path = apply_projection_to_path(root, ordered_rel,
+ sorted_path, target);
+
+ add_path(ordered_rel, sorted_path);
+ }
+ else
+ {
+ /*
+ * Try adding an explicit sort, but only to the cheapest total
+ * path since a full sort should generally add the same cost to
+ * all paths.
+ */
+ if (input_path == cheapest_input_path)
+ {
+ /*
+ * Sort the cheapest input path. An explicit sort here can
+ * take advantage of LIMIT.
+ */
+ sorted_path = (Path *) create_sort_path(root,
+ ordered_rel,
+ input_path,
+ root->sort_pathkeys,
+ limit_tuples);
+ /* Add projection step if needed */
+ if (sorted_path->pathtarget != target)
+ sorted_path = apply_projection_to_path(root, ordered_rel,
+ sorted_path, target);
+
+ add_path(ordered_rel, sorted_path);
+ }
+
+ /*
+ * If incremental sort is enabled, then try it as well. Unlike
+ * with regular sorts, we can't just look at the cheapest path,
+ * because the cost of incremental sort depends on how well
+ * presorted the path is. Additionally incremental sort may enable
+ * a cheaper startup path to win out despite higher total cost.
+ */
+ if (!enable_incremental_sort)
+ continue;
+
+ /* Likewise, if the path can't be used for incremental sort. */
+ if (!presorted_keys)
+ continue;
+
+ /* Also consider incremental sort. */
+ sorted_path = (Path *) create_incremental_sort_path(root,
+ ordered_rel,
+ input_path,
+ root->sort_pathkeys,
+ presorted_keys,
+ limit_tuples);
+
+ /* Add projection step if needed */
+ if (sorted_path->pathtarget != target)
+ sorted_path = apply_projection_to_path(root, ordered_rel,
+ sorted_path, target);
+
+ add_path(ordered_rel, sorted_path);
+ }
+ }
+
+ /*
+ * generate_gather_paths() will have already generated a simple Gather
+ * path for the best parallel path, if any, and the loop above will have
+ * considered sorting it. Similarly, generate_gather_paths() will also
+ * have generated order-preserving Gather Merge plans which can be used
+ * without sorting if they happen to match the sort_pathkeys, and the loop
+ * above will have handled those as well. However, there's one more
+ * possibility: it may make sense to sort the cheapest partial path
+ * according to the required output order and then use Gather Merge.
+ */
+ if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
+ input_rel->partial_pathlist != NIL)
+ {
+ Path *cheapest_partial_path;
+
+ cheapest_partial_path = linitial(input_rel->partial_pathlist);
+
+ /*
+ * If cheapest partial path doesn't need a sort, this is redundant
+ * with what's already been tried.
+ */
+ if (!pathkeys_contained_in(root->sort_pathkeys,
+ cheapest_partial_path->pathkeys))
+ {
+ Path *path;
+ double total_groups;
+
+ path = (Path *) create_sort_path(root,
+ ordered_rel,
+ cheapest_partial_path,
+ root->sort_pathkeys,
+ limit_tuples);
+
+ total_groups = cheapest_partial_path->rows *
+ cheapest_partial_path->parallel_workers;
+ path = (Path *)
+ create_gather_merge_path(root, ordered_rel,
+ path,
+ path->pathtarget,
+ root->sort_pathkeys, NULL,
+ &total_groups);
+
+ /* Add projection step if needed */
+ if (path->pathtarget != target)
+ path = apply_projection_to_path(root, ordered_rel,
+ path, target);
+
+ add_path(ordered_rel, path);
+ }
+
+ /*
+ * Consider incremental sort with a gather merge on partial paths.
+ *
+ * We can also skip the entire loop when we only have a single-item
+ * sort_pathkeys because then we can't possibly have a presorted
+ * prefix of the list without having the list be fully sorted.
+ */
+ if (enable_incremental_sort && list_length(root->sort_pathkeys) > 1)
+ {
+ ListCell *lc;
+
+ foreach(lc, input_rel->partial_pathlist)
+ {
+ Path *input_path = (Path *) lfirst(lc);
+ Path *sorted_path;
+ bool is_sorted;
+ int presorted_keys;
+ double total_groups;
+
+ /*
+ * We don't care if this is the cheapest partial path - we
+ * can't simply skip it, because it may be partially sorted in
+ * which case we want to consider adding incremental sort
+ * (instead of full sort, which is what happens above).
+ */
+
+ is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
+ input_path->pathkeys,
+ &presorted_keys);
+
+ /* No point in adding incremental sort on fully sorted paths. */
+ if (is_sorted)
+ continue;
+
+ if (presorted_keys == 0)
+ continue;
+
+ /* Since we have presorted keys, consider incremental sort. */
+ sorted_path = (Path *) create_incremental_sort_path(root,
+ ordered_rel,
+ input_path,
+ root->sort_pathkeys,
+ presorted_keys,
+ limit_tuples);
+ total_groups = input_path->rows *
+ input_path->parallel_workers;
+ sorted_path = (Path *)
+ create_gather_merge_path(root, ordered_rel,
+ sorted_path,
+ sorted_path->pathtarget,
+ root->sort_pathkeys, NULL,
+ &total_groups);
+
+ /* Add projection step if needed */
+ if (sorted_path->pathtarget != target)
+ sorted_path = apply_projection_to_path(root, ordered_rel,
+ sorted_path, target);
+
+ add_path(ordered_rel, sorted_path);
+ }
+ }
+ }
+
+ /*
+ * If there is an FDW that's responsible for all baserels of the query,
+ * let it consider adding ForeignPaths.
+ */
+ if (ordered_rel->fdwroutine &&
+ ordered_rel->fdwroutine->GetForeignUpperPaths)
+ ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
+ input_rel, ordered_rel,
+ NULL);
+
+ /* Let extensions possibly add some more paths */
+ if (create_upper_paths_hook)
+ (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
+ input_rel, ordered_rel, NULL);
+
+ /*
+ * No need to bother with set_cheapest here; grouping_planner does not
+ * need us to do it.
+ */
+ Assert(ordered_rel->pathlist != NIL);
+
+ return ordered_rel;
+}
+
+
+/*
+ * make_group_input_target
+ * Generate appropriate PathTarget for initial input to grouping nodes.
+ *
+ * If there is grouping or aggregation, the scan/join subplan cannot emit
+ * the query's final targetlist; for example, it certainly can't emit any
+ * aggregate function calls. This routine generates the correct target
+ * for the scan/join subplan.
+ *
+ * The query target list passed from the parser already contains entries
+ * for all ORDER BY and GROUP BY expressions, but it will not have entries
+ * for variables used only in HAVING clauses; so we need to add those
+ * variables to the subplan target list. Also, we flatten all expressions
+ * except GROUP BY items into their component variables; other expressions
+ * will be computed by the upper plan nodes rather than by the subplan.
+ * For example, given a query like
+ * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
+ * we want to pass this targetlist to the subplan:
+ * a+b,c,d
+ * where the a+b target will be used by the Sort/Group steps, and the
+ * other targets will be used for computing the final results.
+ *
+ * 'final_target' is the query's final target list (in PathTarget form)
+ *
+ * The result is the PathTarget to be computed by the Paths returned from
+ * query_planner().
+ */
+static PathTarget *
+make_group_input_target(PlannerInfo *root, PathTarget *final_target)
+{
+ Query *parse = root->parse;
+ PathTarget *input_target;
+ List *non_group_cols;
+ List *non_group_vars;
+ int i;
+ ListCell *lc;
+
+ /*
+ * We must build a target containing all grouping columns, plus any other
+ * Vars mentioned in the query's targetlist and HAVING qual.
+ */
+ input_target = create_empty_pathtarget();
+ non_group_cols = NIL;
+
+ i = 0;
+ foreach(lc, final_target->exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+ Index sgref = get_pathtarget_sortgroupref(final_target, i);
+
+ if (sgref && parse->groupClause &&
+ get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
+ {
+ /*
+ * It's a grouping column, so add it to the input target as-is.
+ */
+ add_column_to_pathtarget(input_target, expr, sgref);
+ }
+ else
+ {
+ /*
+ * Non-grouping column, so just remember the expression for later
+ * call to pull_var_clause.
+ */
+ non_group_cols = lappend(non_group_cols, expr);
+ }
+
+ i++;
+ }
+
+ /*
+ * If there's a HAVING clause, we'll need the Vars it uses, too.
+ */
+ if (parse->havingQual)
+ non_group_cols = lappend(non_group_cols, parse->havingQual);
+
+ /*
+ * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
+ * add them to the input target if not already present. (A Var used
+ * directly as a GROUP BY item will be present already.) Note this
+ * includes Vars used in resjunk items, so we are covering the needs of
+ * ORDER BY and window specifications. Vars used within Aggrefs and
+ * WindowFuncs will be pulled out here, too.
+ */
+ non_group_vars = pull_var_clause((Node *) non_group_cols,
+ PVC_RECURSE_AGGREGATES |
+ PVC_RECURSE_WINDOWFUNCS |
+ PVC_INCLUDE_PLACEHOLDERS);
+ add_new_columns_to_pathtarget(input_target, non_group_vars);
+
+ /* clean up cruft */
+ list_free(non_group_vars);
+ list_free(non_group_cols);
+
+ /* XXX this causes some redundant cost calculation ... */
+ return set_pathtarget_cost_width(root, input_target);
+}
+
+/*
+ * make_partial_grouping_target
+ * Generate appropriate PathTarget for output of partial aggregate
+ * (or partial grouping, if there are no aggregates) nodes.
+ *
+ * A partial aggregation node needs to emit all the same aggregates that
+ * a regular aggregation node would, plus any aggregates used in HAVING;
+ * except that the Aggref nodes should be marked as partial aggregates.
+ *
+ * In addition, we'd better emit any Vars and PlaceHolderVars that are
+ * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
+ * these would be Vars that are grouped by or used in grouping expressions.)
+ *
+ * grouping_target is the tlist to be emitted by the topmost aggregation step.
+ * havingQual represents the HAVING clause.
+ */
+static PathTarget *
+make_partial_grouping_target(PlannerInfo *root,
+ PathTarget *grouping_target,
+ Node *havingQual)
+{
+ Query *parse = root->parse;
+ PathTarget *partial_target;
+ List *non_group_cols;
+ List *non_group_exprs;
+ int i;
+ ListCell *lc;
+
+ partial_target = create_empty_pathtarget();
+ non_group_cols = NIL;
+
+ i = 0;
+ foreach(lc, grouping_target->exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+ Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
+
+ if (sgref && parse->groupClause &&
+ get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
+ {
+ /*
+ * It's a grouping column, so add it to the partial_target as-is.
+ * (This allows the upper agg step to repeat the grouping calcs.)
+ */
+ add_column_to_pathtarget(partial_target, expr, sgref);
+ }
+ else
+ {
+ /*
+ * Non-grouping column, so just remember the expression for later
+ * call to pull_var_clause.
+ */
+ non_group_cols = lappend(non_group_cols, expr);
+ }
+
+ i++;
+ }
+
+ /*
+ * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
+ */
+ if (havingQual)
+ non_group_cols = lappend(non_group_cols, havingQual);
+
+ /*
+ * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
+ * non-group cols (plus HAVING), and add them to the partial_target if not
+ * already present. (An expression used directly as a GROUP BY item will
+ * be present already.) Note this includes Vars used in resjunk items, so
+ * we are covering the needs of ORDER BY and window specifications.
+ */
+ non_group_exprs = pull_var_clause((Node *) non_group_cols,
+ PVC_INCLUDE_AGGREGATES |
+ PVC_RECURSE_WINDOWFUNCS |
+ PVC_INCLUDE_PLACEHOLDERS);
+
+ add_new_columns_to_pathtarget(partial_target, non_group_exprs);
+
+ /*
+ * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
+ * are at the top level of the target list, so we can just scan the list
+ * rather than recursing through the expression trees.
+ */
+ foreach(lc, partial_target->exprs)
+ {
+ Aggref *aggref = (Aggref *) lfirst(lc);
+
+ if (IsA(aggref, Aggref))
+ {
+ Aggref *newaggref;
+
+ /*
+ * We shouldn't need to copy the substructure of the Aggref node,
+ * but flat-copy the node itself to avoid damaging other trees.
+ */
+ newaggref = makeNode(Aggref);
+ memcpy(newaggref, aggref, sizeof(Aggref));
+
+ /* For now, assume serialization is required */
+ mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL);
+
+ lfirst(lc) = newaggref;
+ }
+ }
+
+ /* clean up cruft */
+ list_free(non_group_exprs);
+ list_free(non_group_cols);
+
+ /* XXX this causes some redundant cost calculation ... */
+ return set_pathtarget_cost_width(root, partial_target);
+}
+
+/*
+ * mark_partial_aggref
+ * Adjust an Aggref to make it represent a partial-aggregation step.
+ *
+ * The Aggref node is modified in-place; caller must do any copying required.
+ */
+void
+mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
+{
+ /* aggtranstype should be computed by this point */
+ Assert(OidIsValid(agg->aggtranstype));
+ /* ... but aggsplit should still be as the parser left it */
+ Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
+
+ /* Mark the Aggref with the intended partial-aggregation mode */
+ agg->aggsplit = aggsplit;
+
+ /*
+ * Adjust result type if needed. Normally, a partial aggregate returns
+ * the aggregate's transition type; but if that's INTERNAL and we're
+ * serializing, it returns BYTEA instead.
+ */
+ if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
+ {
+ if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
+ agg->aggtype = BYTEAOID;
+ else
+ agg->aggtype = agg->aggtranstype;
+ }
+}
+
+/*
+ * postprocess_setop_tlist
+ * Fix up targetlist returned by plan_set_operations().
+ *
+ * We need to transpose sort key info from the orig_tlist into new_tlist.
+ * NOTE: this would not be good enough if we supported resjunk sort keys
+ * for results of set operations --- then, we'd need to project a whole
+ * new tlist to evaluate the resjunk columns. For now, just ereport if we
+ * find any resjunk columns in orig_tlist.
+ */
+static List *
+postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
+{
+ ListCell *l;
+ ListCell *orig_tlist_item = list_head(orig_tlist);
+
+ foreach(l, new_tlist)
+ {
+ TargetEntry *new_tle = lfirst_node(TargetEntry, l);
+ TargetEntry *orig_tle;
+
+ /* ignore resjunk columns in setop result */
+ if (new_tle->resjunk)
+ continue;
+
+ Assert(orig_tlist_item != NULL);
+ orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
+ orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
+ if (orig_tle->resjunk) /* should not happen */
+ elog(ERROR, "resjunk output columns are not implemented");
+ Assert(new_tle->resno == orig_tle->resno);
+ new_tle->ressortgroupref = orig_tle->ressortgroupref;
+ }
+ if (orig_tlist_item != NULL)
+ elog(ERROR, "resjunk output columns are not implemented");
+ return new_tlist;
+}
+
+/*
+ * select_active_windows
+ * Create a list of the "active" window clauses (ie, those referenced
+ * by non-deleted WindowFuncs) in the order they are to be executed.
+ */
+static List *
+select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
+{
+ List *windowClause = root->parse->windowClause;
+ List *result = NIL;
+ ListCell *lc;
+ int nActive = 0;
+ WindowClauseSortData *actives = palloc(sizeof(WindowClauseSortData)
+ * list_length(windowClause));
+
+ /* First, construct an array of the active windows */
+ foreach(lc, windowClause)
+ {
+ WindowClause *wc = lfirst_node(WindowClause, lc);
+
+ /* It's only active if wflists shows some related WindowFuncs */
+ Assert(wc->winref <= wflists->maxWinRef);
+ if (wflists->windowFuncs[wc->winref] == NIL)
+ continue;
+
+ actives[nActive].wc = wc; /* original clause */
+
+ /*
+ * For sorting, we want the list of partition keys followed by the
+ * list of sort keys. But pathkeys construction will remove duplicates
+ * between the two, so we can as well (even though we can't detect all
+ * of the duplicates, since some may come from ECs - that might mean
+ * we miss optimization chances here). We must, however, ensure that
+ * the order of entries is preserved with respect to the ones we do
+ * keep.
+ *
+ * partitionClause and orderClause had their own duplicates removed in
+ * parse analysis, so we're only concerned here with removing
+ * orderClause entries that also appear in partitionClause.
+ */
+ actives[nActive].uniqueOrder =
+ list_concat_unique(list_copy(wc->partitionClause),
+ wc->orderClause);
+ nActive++;
+ }
+
+ /*
+ * Sort active windows by their partitioning/ordering clauses, ignoring
+ * any framing clauses, so that the windows that need the same sorting are
+ * adjacent in the list. When we come to generate paths, this will avoid
+ * inserting additional Sort nodes.
+ *
+ * This is how we implement a specific requirement from the SQL standard,
+ * which says that when two or more windows are order-equivalent (i.e.
+ * have matching partition and order clauses, even if their names or
+ * framing clauses differ), then all peer rows must be presented in the
+ * same order in all of them. If we allowed multiple sort nodes for such
+ * cases, we'd risk having the peer rows end up in different orders in
+ * equivalent windows due to sort instability. (See General Rule 4 of
+ * <window clause> in SQL2008 - SQL2016.)
+ *
+ * Additionally, if the entire list of clauses of one window is a prefix
+ * of another, put first the window with stronger sorting requirements.
+ * This way we will first sort for stronger window, and won't have to sort
+ * again for the weaker one.
+ */
+ qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
+
+ /* build ordered list of the original WindowClause nodes */
+ for (int i = 0; i < nActive; i++)
+ result = lappend(result, actives[i].wc);
+
+ pfree(actives);
+
+ return result;
+}
+
+/*
+ * common_prefix_cmp
+ * QSort comparison function for WindowClauseSortData
+ *
+ * Sort the windows by the required sorting clauses. First, compare the sort
+ * clauses themselves. Second, if one window's clauses are a prefix of another
+ * one's clauses, put the window with more sort clauses first.
+ */
+static int
+common_prefix_cmp(const void *a, const void *b)
+{
+ const WindowClauseSortData *wcsa = a;
+ const WindowClauseSortData *wcsb = b;
+ ListCell *item_a;
+ ListCell *item_b;
+
+ forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
+ {
+ SortGroupClause *sca = lfirst_node(SortGroupClause, item_a);
+ SortGroupClause *scb = lfirst_node(SortGroupClause, item_b);
+
+ if (sca->tleSortGroupRef > scb->tleSortGroupRef)
+ return -1;
+ else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
+ return 1;
+ else if (sca->sortop > scb->sortop)
+ return -1;
+ else if (sca->sortop < scb->sortop)
+ return 1;
+ else if (sca->nulls_first && !scb->nulls_first)
+ return -1;
+ else if (!sca->nulls_first && scb->nulls_first)
+ return 1;
+ /* no need to compare eqop, since it is fully determined by sortop */
+ }
+
+ if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
+ return -1;
+ else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * make_window_input_target
+ * Generate appropriate PathTarget for initial input to WindowAgg nodes.
+ *
+ * When the query has window functions, this function computes the desired
+ * target to be computed by the node just below the first WindowAgg.
+ * This tlist must contain all values needed to evaluate the window functions,
+ * compute the final target list, and perform any required final sort step.
+ * If multiple WindowAggs are needed, each intermediate one adds its window
+ * function results onto this base tlist; only the topmost WindowAgg computes
+ * the actual desired target list.
+ *
+ * This function is much like make_group_input_target, though not quite enough
+ * like it to share code. As in that function, we flatten most expressions
+ * into their component variables. But we do not want to flatten window
+ * PARTITION BY/ORDER BY clauses, since that might result in multiple
+ * evaluations of them, which would be bad (possibly even resulting in
+ * inconsistent answers, if they contain volatile functions).
+ * Also, we must not flatten GROUP BY clauses that were left unflattened by
+ * make_group_input_target, because we may no longer have access to the
+ * individual Vars in them.
+ *
+ * Another key difference from make_group_input_target is that we don't
+ * flatten Aggref expressions, since those are to be computed below the
+ * window functions and just referenced like Vars above that.
+ *
+ * 'final_target' is the query's final target list (in PathTarget form)
+ * 'activeWindows' is the list of active windows previously identified by
+ * select_active_windows.
+ *
+ * The result is the PathTarget to be computed by the plan node immediately
+ * below the first WindowAgg node.
+ */
+static PathTarget *
+make_window_input_target(PlannerInfo *root,
+ PathTarget *final_target,
+ List *activeWindows)
+{
+ Query *parse = root->parse;
+ PathTarget *input_target;
+ Bitmapset *sgrefs;
+ List *flattenable_cols;
+ List *flattenable_vars;
+ int i;
+ ListCell *lc;
+
+ Assert(parse->hasWindowFuncs);
+
+ /*
+ * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
+ * into a bitmapset for convenient reference below.
+ */
+ sgrefs = NULL;
+ foreach(lc, activeWindows)
+ {
+ WindowClause *wc = lfirst_node(WindowClause, lc);
+ ListCell *lc2;
+
+ foreach(lc2, wc->partitionClause)
+ {
+ SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
+
+ sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
+ }
+ foreach(lc2, wc->orderClause)
+ {
+ SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
+
+ sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
+ }
+ }
+
+ /* Add in sortgroupref numbers of GROUP BY clauses, too */
+ foreach(lc, parse->groupClause)
+ {
+ SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc);
+
+ sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
+ }
+
+ /*
+ * Construct a target containing all the non-flattenable targetlist items,
+ * and save aside the others for a moment.
+ */
+ input_target = create_empty_pathtarget();
+ flattenable_cols = NIL;
+
+ i = 0;
+ foreach(lc, final_target->exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+ Index sgref = get_pathtarget_sortgroupref(final_target, i);
+
+ /*
+ * Don't want to deconstruct window clauses or GROUP BY items. (Note
+ * that such items can't contain window functions, so it's okay to
+ * compute them below the WindowAgg nodes.)
+ */
+ if (sgref != 0 && bms_is_member(sgref, sgrefs))
+ {
+ /*
+ * Don't want to deconstruct this value, so add it to the input
+ * target as-is.
+ */
+ add_column_to_pathtarget(input_target, expr, sgref);
+ }
+ else
+ {
+ /*
+ * Column is to be flattened, so just remember the expression for
+ * later call to pull_var_clause.
+ */
+ flattenable_cols = lappend(flattenable_cols, expr);
+ }
+
+ i++;
+ }
+
+ /*
+ * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
+ * add them to the input target if not already present. (Some might be
+ * there already because they're used directly as window/group clauses.)
+ *
+ * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
+ * Aggrefs are placed in the Agg node's tlist and not left to be computed
+ * at higher levels. On the other hand, we should recurse into
+ * WindowFuncs to make sure their input expressions are available.
+ */
+ flattenable_vars = pull_var_clause((Node *) flattenable_cols,
+ PVC_INCLUDE_AGGREGATES |
+ PVC_RECURSE_WINDOWFUNCS |
+ PVC_INCLUDE_PLACEHOLDERS);
+ add_new_columns_to_pathtarget(input_target, flattenable_vars);
+
+ /* clean up cruft */
+ list_free(flattenable_vars);
+ list_free(flattenable_cols);
+
+ /* XXX this causes some redundant cost calculation ... */
+ return set_pathtarget_cost_width(root, input_target);
+}
+
+/*
+ * make_pathkeys_for_window
+ * Create a pathkeys list describing the required input ordering
+ * for the given WindowClause.
+ *
+ * The required ordering is first the PARTITION keys, then the ORDER keys.
+ * In the future we might try to implement windowing using hashing, in which
+ * case the ordering could be relaxed, but for now we always sort.
+ */
+static List *
+make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
+ List *tlist)
+{
+ List *window_pathkeys;
+ List *window_sortclauses;
+
+ /* Throw error if can't sort */
+ if (!grouping_is_sortable(wc->partitionClause))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("could not implement window PARTITION BY"),
+ errdetail("Window partitioning columns must be of sortable datatypes.")));
+ if (!grouping_is_sortable(wc->orderClause))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("could not implement window ORDER BY"),
+ errdetail("Window ordering columns must be of sortable datatypes.")));
+
+ /* Okay, make the combined pathkeys */
+ window_sortclauses = list_concat_copy(wc->partitionClause, wc->orderClause);
+ window_pathkeys = make_pathkeys_for_sortclauses(root,
+ window_sortclauses,
+ tlist);
+ list_free(window_sortclauses);
+ return window_pathkeys;
+}
+
+/*
+ * make_sort_input_target
+ * Generate appropriate PathTarget for initial input to Sort step.
+ *
+ * If the query has ORDER BY, this function chooses the target to be computed
+ * by the node just below the Sort (and DISTINCT, if any, since Unique can't
+ * project) steps. This might or might not be identical to the query's final
+ * output target.
+ *
+ * The main argument for keeping the sort-input tlist the same as the final
+ * is that we avoid a separate projection node (which will be needed if
+ * they're different, because Sort can't project). However, there are also
+ * advantages to postponing tlist evaluation till after the Sort: it ensures
+ * a consistent order of evaluation for any volatile functions in the tlist,
+ * and if there's also a LIMIT, we can stop the query without ever computing
+ * tlist functions for later rows, which is beneficial for both volatile and
+ * expensive functions.
+ *
+ * Our current policy is to postpone volatile expressions till after the sort
+ * unconditionally (assuming that that's possible, ie they are in plain tlist
+ * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
+ * postpone set-returning expressions, because running them beforehand would
+ * bloat the sort dataset, and because it might cause unexpected output order
+ * if the sort isn't stable. However there's a constraint on that: all SRFs
+ * in the tlist should be evaluated at the same plan step, so that they can
+ * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
+ * mustn't postpone any SRFs. (Note that in principle that policy should
+ * probably get applied to the group/window input targetlists too, but we
+ * have not done that historically.) Lastly, expensive expressions are
+ * postponed if there is a LIMIT, or if root->tuple_fraction shows that
+ * partial evaluation of the query is possible (if neither is true, we expect
+ * to have to evaluate the expressions for every row anyway), or if there are
+ * any volatile or set-returning expressions (since once we've put in a
+ * projection at all, it won't cost any more to postpone more stuff).
+ *
+ * Another issue that could potentially be considered here is that
+ * evaluating tlist expressions could result in data that's either wider
+ * or narrower than the input Vars, thus changing the volume of data that
+ * has to go through the Sort. However, we usually have only a very bad
+ * idea of the output width of any expression more complex than a Var,
+ * so for now it seems too risky to try to optimize on that basis.
+ *
+ * Note that if we do produce a modified sort-input target, and then the
+ * query ends up not using an explicit Sort, no particular harm is done:
+ * we'll initially use the modified target for the preceding path nodes,
+ * but then change them to the final target with apply_projection_to_path.
+ * Moreover, in such a case the guarantees about evaluation order of
+ * volatile functions still hold, since the rows are sorted already.
+ *
+ * This function has some things in common with make_group_input_target and
+ * make_window_input_target, though the detailed rules for what to do are
+ * different. We never flatten/postpone any grouping or ordering columns;
+ * those are needed before the sort. If we do flatten a particular
+ * expression, we leave Aggref and WindowFunc nodes alone, since those were
+ * computed earlier.
+ *
+ * 'final_target' is the query's final target list (in PathTarget form)
+ * 'have_postponed_srfs' is an output argument, see below
+ *
+ * The result is the PathTarget to be computed by the plan node immediately
+ * below the Sort step (and the Distinct step, if any). This will be
+ * exactly final_target if we decide a projection step wouldn't be helpful.
+ *
+ * In addition, *have_postponed_srfs is set to true if we choose to postpone
+ * any set-returning functions to after the Sort.
+ */
+static PathTarget *
+make_sort_input_target(PlannerInfo *root,
+ PathTarget *final_target,
+ bool *have_postponed_srfs)
+{
+ Query *parse = root->parse;
+ PathTarget *input_target;
+ int ncols;
+ bool *col_is_srf;
+ bool *postpone_col;
+ bool have_srf;
+ bool have_volatile;
+ bool have_expensive;
+ bool have_srf_sortcols;
+ bool postpone_srfs;
+ List *postponable_cols;
+ List *postponable_vars;
+ int i;
+ ListCell *lc;
+
+ /* Shouldn't get here unless query has ORDER BY */
+ Assert(parse->sortClause);
+
+ *have_postponed_srfs = false; /* default result */
+
+ /* Inspect tlist and collect per-column information */
+ ncols = list_length(final_target->exprs);
+ col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
+ postpone_col = (bool *) palloc0(ncols * sizeof(bool));
+ have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
+
+ i = 0;
+ foreach(lc, final_target->exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+
+ /*
+ * If the column has a sortgroupref, assume it has to be evaluated
+ * before sorting. Generally such columns would be ORDER BY, GROUP
+ * BY, etc targets. One exception is columns that were removed from
+ * GROUP BY by remove_useless_groupby_columns() ... but those would
+ * only be Vars anyway. There don't seem to be any cases where it
+ * would be worth the trouble to double-check.
+ */
+ if (get_pathtarget_sortgroupref(final_target, i) == 0)
+ {
+ /*
+ * Check for SRF or volatile functions. Check the SRF case first
+ * because we must know whether we have any postponed SRFs.
+ */
+ if (parse->hasTargetSRFs &&
+ expression_returns_set((Node *) expr))
+ {
+ /* We'll decide below whether these are postponable */
+ col_is_srf[i] = true;
+ have_srf = true;
+ }
+ else if (contain_volatile_functions((Node *) expr))
+ {
+ /* Unconditionally postpone */
+ postpone_col[i] = true;
+ have_volatile = true;
+ }
+ else
+ {
+ /*
+ * Else check the cost. XXX it's annoying to have to do this
+ * when set_pathtarget_cost_width() just did it. Refactor to
+ * allow sharing the work?
+ */
+ QualCost cost;
+
+ cost_qual_eval_node(&cost, (Node *) expr, root);
+
+ /*
+ * We arbitrarily define "expensive" as "more than 10X
+ * cpu_operator_cost". Note this will take in any PL function
+ * with default cost.
+ */
+ if (cost.per_tuple > 10 * cpu_operator_cost)
+ {
+ postpone_col[i] = true;
+ have_expensive = true;
+ }
+ }
+ }
+ else
+ {
+ /* For sortgroupref cols, just check if any contain SRFs */
+ if (!have_srf_sortcols &&
+ parse->hasTargetSRFs &&
+ expression_returns_set((Node *) expr))
+ have_srf_sortcols = true;
+ }
+
+ i++;
+ }
+
+ /*
+ * We can postpone SRFs if we have some but none are in sortgroupref cols.
+ */
+ postpone_srfs = (have_srf && !have_srf_sortcols);
+
+ /*
+ * If we don't need a post-sort projection, just return final_target.
+ */
+ if (!(postpone_srfs || have_volatile ||
+ (have_expensive &&
+ (parse->limitCount || root->tuple_fraction > 0))))
+ return final_target;
+
+ /*
+ * Report whether the post-sort projection will contain set-returning
+ * functions. This is important because it affects whether the Sort can
+ * rely on the query's LIMIT (if any) to bound the number of rows it needs
+ * to return.
+ */
+ *have_postponed_srfs = postpone_srfs;
+
+ /*
+ * Construct the sort-input target, taking all non-postponable columns and
+ * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
+ * the postponable ones.
+ */
+ input_target = create_empty_pathtarget();
+ postponable_cols = NIL;
+
+ i = 0;
+ foreach(lc, final_target->exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+
+ if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
+ postponable_cols = lappend(postponable_cols, expr);
+ else
+ add_column_to_pathtarget(input_target, expr,
+ get_pathtarget_sortgroupref(final_target, i));
+
+ i++;
+ }
+
+ /*
+ * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
+ * postponable columns, and add them to the sort-input target if not
+ * already present. (Some might be there already.) We mustn't
+ * deconstruct Aggrefs or WindowFuncs here, since the projection node
+ * would be unable to recompute them.
+ */
+ postponable_vars = pull_var_clause((Node *) postponable_cols,
+ PVC_INCLUDE_AGGREGATES |
+ PVC_INCLUDE_WINDOWFUNCS |
+ PVC_INCLUDE_PLACEHOLDERS);
+ add_new_columns_to_pathtarget(input_target, postponable_vars);
+
+ /* clean up cruft */
+ list_free(postponable_vars);
+ list_free(postponable_cols);
+
+ /* XXX this represents even more redundant cost calculation ... */
+ return set_pathtarget_cost_width(root, input_target);
+}
+
+/*
+ * get_cheapest_fractional_path
+ * Find the cheapest path for retrieving a specified fraction of all
+ * the tuples expected to be returned by the given relation.
+ *
+ * We interpret tuple_fraction the same way as grouping_planner.
+ *
+ * We assume set_cheapest() has been run on the given rel.
+ */
+Path *
+get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
+{
+ Path *best_path = rel->cheapest_total_path;
+ ListCell *l;
+
+ /* If all tuples will be retrieved, just return the cheapest-total path */
+ if (tuple_fraction <= 0.0)
+ return best_path;
+
+ /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
+ if (tuple_fraction >= 1.0 && best_path->rows > 0)
+ tuple_fraction /= best_path->rows;
+
+ foreach(l, rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(l);
+
+ if (path == rel->cheapest_total_path ||
+ compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
+ continue;
+
+ best_path = path;
+ }
+
+ return best_path;
+}
+
+/*
+ * adjust_paths_for_srfs
+ * Fix up the Paths of the given upperrel to handle tSRFs properly.
+ *
+ * The executor can only handle set-returning functions that appear at the
+ * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
+ * that are not at top level, we need to split up the evaluation into multiple
+ * plan levels in which each level satisfies this constraint. This function
+ * modifies each Path of an upperrel that (might) compute any SRFs in its
+ * output tlist to insert appropriate projection steps.
+ *
+ * The given targets and targets_contain_srfs lists are from
+ * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
+ * target in targets.
+ */
+static void
+adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
+ List *targets, List *targets_contain_srfs)
+{
+ ListCell *lc;
+
+ Assert(list_length(targets) == list_length(targets_contain_srfs));
+ Assert(!linitial_int(targets_contain_srfs));
+
+ /* If no SRFs appear at this plan level, nothing to do */
+ if (list_length(targets) == 1)
+ return;
+
+ /*
+ * Stack SRF-evaluation nodes atop each path for the rel.
+ *
+ * In principle we should re-run set_cheapest() here to identify the
+ * cheapest path, but it seems unlikely that adding the same tlist eval
+ * costs to all the paths would change that, so we don't bother. Instead,
+ * just assume that the cheapest-startup and cheapest-total paths remain
+ * so. (There should be no parameterized paths anymore, so we needn't
+ * worry about updating cheapest_parameterized_paths.)
+ */
+ foreach(lc, rel->pathlist)
+ {
+ Path *subpath = (Path *) lfirst(lc);
+ Path *newpath = subpath;
+ ListCell *lc1,
+ *lc2;
+
+ Assert(subpath->param_info == NULL);
+ forboth(lc1, targets, lc2, targets_contain_srfs)
+ {
+ PathTarget *thistarget = lfirst_node(PathTarget, lc1);
+ bool contains_srfs = (bool) lfirst_int(lc2);
+
+ /* If this level doesn't contain SRFs, do regular projection */
+ if (contains_srfs)
+ newpath = (Path *) create_set_projection_path(root,
+ rel,
+ newpath,
+ thistarget);
+ else
+ newpath = (Path *) apply_projection_to_path(root,
+ rel,
+ newpath,
+ thistarget);
+ }
+ lfirst(lc) = newpath;
+ if (subpath == rel->cheapest_startup_path)
+ rel->cheapest_startup_path = newpath;
+ if (subpath == rel->cheapest_total_path)
+ rel->cheapest_total_path = newpath;
+ }
+
+ /* Likewise for partial paths, if any */
+ foreach(lc, rel->partial_pathlist)
+ {
+ Path *subpath = (Path *) lfirst(lc);
+ Path *newpath = subpath;
+ ListCell *lc1,
+ *lc2;
+
+ Assert(subpath->param_info == NULL);
+ forboth(lc1, targets, lc2, targets_contain_srfs)
+ {
+ PathTarget *thistarget = lfirst_node(PathTarget, lc1);
+ bool contains_srfs = (bool) lfirst_int(lc2);
+
+ /* If this level doesn't contain SRFs, do regular projection */
+ if (contains_srfs)
+ newpath = (Path *) create_set_projection_path(root,
+ rel,
+ newpath,
+ thistarget);
+ else
+ {
+ /* avoid apply_projection_to_path, in case of multiple refs */
+ newpath = (Path *) create_projection_path(root,
+ rel,
+ newpath,
+ thistarget);
+ }
+ }
+ lfirst(lc) = newpath;
+ }
+}
+
+/*
+ * expression_planner
+ * Perform planner's transformations on a standalone expression.
+ *
+ * Various utility commands need to evaluate expressions that are not part
+ * of a plannable query. They can do so using the executor's regular
+ * expression-execution machinery, but first the expression has to be fed
+ * through here to transform it from parser output to something executable.
+ *
+ * Currently, we disallow sublinks in standalone expressions, so there's no
+ * real "planning" involved here. (That might not always be true though.)
+ * What we must do is run eval_const_expressions to ensure that any function
+ * calls are converted to positional notation and function default arguments
+ * get inserted. The fact that constant subexpressions get simplified is a
+ * side-effect that is useful when the expression will get evaluated more than
+ * once. Also, we must fix operator function IDs.
+ *
+ * This does not return any information about dependencies of the expression.
+ * Hence callers should use the results only for the duration of the current
+ * query. Callers that would like to cache the results for longer should use
+ * expression_planner_with_deps, probably via the plancache.
+ *
+ * Note: this must not make any damaging changes to the passed-in expression
+ * tree. (It would actually be okay to apply fix_opfuncids to it, but since
+ * we first do an expression_tree_mutator-based walk, what is returned will
+ * be a new node tree.) The result is constructed in the current memory
+ * context; beware that this can leak a lot of additional stuff there, too.
+ */
+Expr *
+expression_planner(Expr *expr)
+{
+ Node *result;
+
+ /*
+ * Convert named-argument function calls, insert default arguments and
+ * simplify constant subexprs
+ */
+ result = eval_const_expressions(NULL, (Node *) expr);
+
+ /* Fill in opfuncid values if missing */
+ fix_opfuncids(result);
+
+ return (Expr *) result;
+}
+
+/*
+ * expression_planner_with_deps
+ * Perform planner's transformations on a standalone expression,
+ * returning expression dependency information along with the result.
+ *
+ * This is identical to expression_planner() except that it also returns
+ * information about possible dependencies of the expression, ie identities of
+ * objects whose definitions affect the result. As in a PlannedStmt, these
+ * are expressed as a list of relation Oids and a list of PlanInvalItems.
+ */
+Expr *
+expression_planner_with_deps(Expr *expr,
+ List **relationOids,
+ List **invalItems)
+{
+ Node *result;
+ PlannerGlobal glob;
+ PlannerInfo root;
+
+ /* Make up dummy planner state so we can use setrefs machinery */
+ MemSet(&glob, 0, sizeof(glob));
+ glob.type = T_PlannerGlobal;
+ glob.relationOids = NIL;
+ glob.invalItems = NIL;
+
+ MemSet(&root, 0, sizeof(root));
+ root.type = T_PlannerInfo;
+ root.glob = &glob;
+
+ /*
+ * Convert named-argument function calls, insert default arguments and
+ * simplify constant subexprs. Collect identities of inlined functions
+ * and elided domains, too.
+ */
+ result = eval_const_expressions(&root, (Node *) expr);
+
+ /* Fill in opfuncid values if missing */
+ fix_opfuncids(result);
+
+ /*
+ * Now walk the finished expression to find anything else we ought to
+ * record as an expression dependency.
+ */
+ (void) extract_query_dependencies_walker(result, &root);
+
+ *relationOids = glob.relationOids;
+ *invalItems = glob.invalItems;
+
+ return (Expr *) result;
+}
+
+
+/*
+ * plan_cluster_use_sort
+ * Use the planner to decide how CLUSTER should implement sorting
+ *
+ * tableOid is the OID of a table to be clustered on its index indexOid
+ * (which is already known to be a btree index). Decide whether it's
+ * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
+ * Return true to use sorting, false to use an indexscan.
+ *
+ * Note: caller had better already hold some type of lock on the table.
+ */
+bool
+plan_cluster_use_sort(Oid tableOid, Oid indexOid)
+{
+ PlannerInfo *root;
+ Query *query;
+ PlannerGlobal *glob;
+ RangeTblEntry *rte;
+ RelOptInfo *rel;
+ IndexOptInfo *indexInfo;
+ QualCost indexExprCost;
+ Cost comparisonCost;
+ Path *seqScanPath;
+ Path seqScanAndSortPath;
+ IndexPath *indexScanPath;
+ ListCell *lc;
+
+ /* We can short-circuit the cost comparison if indexscans are disabled */
+ if (!enable_indexscan)
+ return true; /* use sort */
+
+ /* Set up mostly-dummy planner state */
+ query = makeNode(Query);
+ query->commandType = CMD_SELECT;
+
+ glob = makeNode(PlannerGlobal);
+
+ root = makeNode(PlannerInfo);
+ root->parse = query;
+ root->glob = glob;
+ root->query_level = 1;
+ root->planner_cxt = CurrentMemoryContext;
+ root->wt_param_id = -1;
+
+ /* Build a minimal RTE for the rel */
+ rte = makeNode(RangeTblEntry);
+ rte->rtekind = RTE_RELATION;
+ rte->relid = tableOid;
+ rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
+ rte->rellockmode = AccessShareLock;
+ rte->lateral = false;
+ rte->inh = false;
+ rte->inFromCl = true;
+ query->rtable = list_make1(rte);
+
+ /* Set up RTE/RelOptInfo arrays */
+ setup_simple_rel_arrays(root);
+
+ /* Build RelOptInfo */
+ rel = build_simple_rel(root, 1, NULL);
+
+ /* Locate IndexOptInfo for the target index */
+ indexInfo = NULL;
+ foreach(lc, rel->indexlist)
+ {
+ indexInfo = lfirst_node(IndexOptInfo, lc);
+ if (indexInfo->indexoid == indexOid)
+ break;
+ }
+
+ /*
+ * It's possible that get_relation_info did not generate an IndexOptInfo
+ * for the desired index; this could happen if it's not yet reached its
+ * indcheckxmin usability horizon, or if it's a system index and we're
+ * ignoring system indexes. In such cases we should tell CLUSTER to not
+ * trust the index contents but use seqscan-and-sort.
+ */
+ if (lc == NULL) /* not in the list? */
+ return true; /* use sort */
+
+ /*
+ * Rather than doing all the pushups that would be needed to use
+ * set_baserel_size_estimates, just do a quick hack for rows and width.
+ */
+ rel->rows = rel->tuples;
+ rel->reltarget->width = get_relation_data_width(tableOid, NULL);
+
+ root->total_table_pages = rel->pages;
+
+ /*
+ * Determine eval cost of the index expressions, if any. We need to
+ * charge twice that amount for each tuple comparison that happens during
+ * the sort, since tuplesort.c will have to re-evaluate the index
+ * expressions each time. (XXX that's pretty inefficient...)
+ */
+ cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
+ comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
+
+ /* Estimate the cost of seq scan + sort */
+ seqScanPath = create_seqscan_path(root, rel, NULL, 0);
+ cost_sort(&seqScanAndSortPath, root, NIL,
+ seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
+ comparisonCost, maintenance_work_mem, -1.0);
+
+ /* Estimate the cost of index scan */
+ indexScanPath = create_index_path(root, indexInfo,
+ NIL, NIL, NIL, NIL,
+ ForwardScanDirection, false,
+ NULL, 1.0, false);
+
+ return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
+}
+
+/*
+ * plan_create_index_workers
+ * Use the planner to decide how many parallel worker processes
+ * CREATE INDEX should request for use
+ *
+ * tableOid is the table on which the index is to be built. indexOid is the
+ * OID of an index to be created or reindexed (which must be a btree index).
+ *
+ * Return value is the number of parallel worker processes to request. It
+ * may be unsafe to proceed if this is 0. Note that this does not include the
+ * leader participating as a worker (value is always a number of parallel
+ * worker processes).
+ *
+ * Note: caller had better already hold some type of lock on the table and
+ * index.
+ */
+int
+plan_create_index_workers(Oid tableOid, Oid indexOid)
+{
+ PlannerInfo *root;
+ Query *query;
+ PlannerGlobal *glob;
+ RangeTblEntry *rte;
+ Relation heap;
+ Relation index;
+ RelOptInfo *rel;
+ int parallel_workers;
+ BlockNumber heap_blocks;
+ double reltuples;
+ double allvisfrac;
+
+ /*
+ * We don't allow performing parallel operation in standalone backend or
+ * when parallelism is disabled.
+ */
+ if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0)
+ return 0;
+
+ /* Set up largely-dummy planner state */
+ query = makeNode(Query);
+ query->commandType = CMD_SELECT;
+
+ glob = makeNode(PlannerGlobal);
+
+ root = makeNode(PlannerInfo);
+ root->parse = query;
+ root->glob = glob;
+ root->query_level = 1;
+ root->planner_cxt = CurrentMemoryContext;
+ root->wt_param_id = -1;
+
+ /*
+ * Build a minimal RTE.
+ *
+ * Mark the RTE with inh = true. This is a kludge to prevent
+ * get_relation_info() from fetching index info, which is necessary
+ * because it does not expect that any IndexOptInfo is currently
+ * undergoing REINDEX.
+ */
+ rte = makeNode(RangeTblEntry);
+ rte->rtekind = RTE_RELATION;
+ rte->relid = tableOid;
+ rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
+ rte->rellockmode = AccessShareLock;
+ rte->lateral = false;
+ rte->inh = true;
+ rte->inFromCl = true;
+ query->rtable = list_make1(rte);
+
+ /* Set up RTE/RelOptInfo arrays */
+ setup_simple_rel_arrays(root);
+
+ /* Build RelOptInfo */
+ rel = build_simple_rel(root, 1, NULL);
+
+ /* Rels are assumed already locked by the caller */
+ heap = table_open(tableOid, NoLock);
+ index = index_open(indexOid, NoLock);
+
+ /*
+ * Determine if it's safe to proceed.
+ *
+ * Currently, parallel workers can't access the leader's temporary tables.
+ * Furthermore, any index predicate or index expressions must be parallel
+ * safe.
+ */
+ if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
+ !is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
+ !is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
+ {
+ parallel_workers = 0;
+ goto done;
+ }
+
+ /*
+ * If parallel_workers storage parameter is set for the table, accept that
+ * as the number of parallel worker processes to launch (though still cap
+ * at max_parallel_maintenance_workers). Note that we deliberately do not
+ * consider any other factor when parallel_workers is set. (e.g., memory
+ * use by workers.)
+ */
+ if (rel->rel_parallel_workers != -1)
+ {
+ parallel_workers = Min(rel->rel_parallel_workers,
+ max_parallel_maintenance_workers);
+ goto done;
+ }
+
+ /*
+ * Estimate heap relation size ourselves, since rel->pages cannot be
+ * trusted (heap RTE was marked as inheritance parent)
+ */
+ estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
+
+ /*
+ * Determine number of workers to scan the heap relation using generic
+ * model
+ */
+ parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
+ max_parallel_maintenance_workers);
+
+ /*
+ * Cap workers based on available maintenance_work_mem as needed.
+ *
+ * Note that each tuplesort participant receives an even share of the
+ * total maintenance_work_mem budget. Aim to leave participants
+ * (including the leader as a participant) with no less than 32MB of
+ * memory. This leaves cases where maintenance_work_mem is set to 64MB
+ * immediately past the threshold of being capable of launching a single
+ * parallel worker to sort.
+ */
+ while (parallel_workers > 0 &&
+ maintenance_work_mem / (parallel_workers + 1) < 32768L)
+ parallel_workers--;
+
+done:
+ index_close(index, NoLock);
+ table_close(heap, NoLock);
+
+ return parallel_workers;
+}
+
+/*
+ * add_paths_to_grouping_rel
+ *
+ * Add non-partial paths to grouping relation.
+ */
+static void
+add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
+ RelOptInfo *grouped_rel,
+ RelOptInfo *partially_grouped_rel,
+ const AggClauseCosts *agg_costs,
+ grouping_sets_data *gd, double dNumGroups,
+ GroupPathExtraData *extra)
+{
+ Query *parse = root->parse;
+ Path *cheapest_path = input_rel->cheapest_total_path;
+ ListCell *lc;
+ bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
+ bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
+ List *havingQual = (List *) extra->havingQual;
+ AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
+
+ if (can_sort)
+ {
+ /*
+ * Use any available suitably-sorted path as input, and also consider
+ * sorting the cheapest-total path.
+ */
+ foreach(lc, input_rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+ Path *path_original = path;
+ bool is_sorted;
+ int presorted_keys;
+
+ is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
+ path->pathkeys,
+ &presorted_keys);
+
+ if (path == cheapest_path || is_sorted)
+ {
+ /* Sort the cheapest-total path if it isn't already sorted */
+ if (!is_sorted)
+ path = (Path *) create_sort_path(root,
+ grouped_rel,
+ path,
+ root->group_pathkeys,
+ -1.0);
+
+ /* Now decide what to stick atop it */
+ if (parse->groupingSets)
+ {
+ consider_groupingsets_paths(root, grouped_rel,
+ path, true, can_hash,
+ gd, agg_costs, dNumGroups);
+ }
+ else if (parse->hasAggs)
+ {
+ /*
+ * We have aggregation, possibly with plain GROUP BY. Make
+ * an AggPath.
+ */
+ add_path(grouped_rel, (Path *)
+ create_agg_path(root,
+ grouped_rel,
+ path,
+ grouped_rel->reltarget,
+ parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+ AGGSPLIT_SIMPLE,
+ parse->groupClause,
+ havingQual,
+ agg_costs,
+ dNumGroups));
+ }
+ else if (parse->groupClause)
+ {
+ /*
+ * We have GROUP BY without aggregation or grouping sets.
+ * Make a GroupPath.
+ */
+ add_path(grouped_rel, (Path *)
+ create_group_path(root,
+ grouped_rel,
+ path,
+ parse->groupClause,
+ havingQual,
+ dNumGroups));
+ }
+ else
+ {
+ /* Other cases should have been handled above */
+ Assert(false);
+ }
+ }
+
+ /*
+ * Now we may consider incremental sort on this path, but only
+ * when the path is not already sorted and when incremental sort
+ * is enabled.
+ */
+ if (is_sorted || !enable_incremental_sort)
+ continue;
+
+ /* Restore the input path (we might have added Sort on top). */
+ path = path_original;
+
+ /* no shared prefix, no point in building incremental sort */
+ if (presorted_keys == 0)
+ continue;
+
+ /*
+ * We should have already excluded pathkeys of length 1 because
+ * then presorted_keys > 0 would imply is_sorted was true.
+ */
+ Assert(list_length(root->group_pathkeys) != 1);
+
+ path = (Path *) create_incremental_sort_path(root,
+ grouped_rel,
+ path,
+ root->group_pathkeys,
+ presorted_keys,
+ -1.0);
+
+ /* Now decide what to stick atop it */
+ if (parse->groupingSets)
+ {
+ consider_groupingsets_paths(root, grouped_rel,
+ path, true, can_hash,
+ gd, agg_costs, dNumGroups);
+ }
+ else if (parse->hasAggs)
+ {
+ /*
+ * We have aggregation, possibly with plain GROUP BY. Make an
+ * AggPath.
+ */
+ add_path(grouped_rel, (Path *)
+ create_agg_path(root,
+ grouped_rel,
+ path,
+ grouped_rel->reltarget,
+ parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+ AGGSPLIT_SIMPLE,
+ parse->groupClause,
+ havingQual,
+ agg_costs,
+ dNumGroups));
+ }
+ else if (parse->groupClause)
+ {
+ /*
+ * We have GROUP BY without aggregation or grouping sets. Make
+ * a GroupPath.
+ */
+ add_path(grouped_rel, (Path *)
+ create_group_path(root,
+ grouped_rel,
+ path,
+ parse->groupClause,
+ havingQual,
+ dNumGroups));
+ }
+ else
+ {
+ /* Other cases should have been handled above */
+ Assert(false);
+ }
+ }
+
+ /*
+ * Instead of operating directly on the input relation, we can
+ * consider finalizing a partially aggregated path.
+ */
+ if (partially_grouped_rel != NULL)
+ {
+ foreach(lc, partially_grouped_rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+ Path *path_original = path;
+ bool is_sorted;
+ int presorted_keys;
+
+ is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
+ path->pathkeys,
+ &presorted_keys);
+
+ /*
+ * Insert a Sort node, if required. But there's no point in
+ * sorting anything but the cheapest path.
+ */
+ if (!is_sorted)
+ {
+ if (path != partially_grouped_rel->cheapest_total_path)
+ continue;
+ path = (Path *) create_sort_path(root,
+ grouped_rel,
+ path,
+ root->group_pathkeys,
+ -1.0);
+ }
+
+ if (parse->hasAggs)
+ add_path(grouped_rel, (Path *)
+ create_agg_path(root,
+ grouped_rel,
+ path,
+ grouped_rel->reltarget,
+ parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+ AGGSPLIT_FINAL_DESERIAL,
+ parse->groupClause,
+ havingQual,
+ agg_final_costs,
+ dNumGroups));
+ else
+ add_path(grouped_rel, (Path *)
+ create_group_path(root,
+ grouped_rel,
+ path,
+ parse->groupClause,
+ havingQual,
+ dNumGroups));
+
+ /*
+ * Now we may consider incremental sort on this path, but only
+ * when the path is not already sorted and when incremental
+ * sort is enabled.
+ */
+ if (is_sorted || !enable_incremental_sort)
+ continue;
+
+ /* Restore the input path (we might have added Sort on top). */
+ path = path_original;
+
+ /* no shared prefix, not point in building incremental sort */
+ if (presorted_keys == 0)
+ continue;
+
+ /*
+ * We should have already excluded pathkeys of length 1
+ * because then presorted_keys > 0 would imply is_sorted was
+ * true.
+ */
+ Assert(list_length(root->group_pathkeys) != 1);
+
+ path = (Path *) create_incremental_sort_path(root,
+ grouped_rel,
+ path,
+ root->group_pathkeys,
+ presorted_keys,
+ -1.0);
+
+ if (parse->hasAggs)
+ add_path(grouped_rel, (Path *)
+ create_agg_path(root,
+ grouped_rel,
+ path,
+ grouped_rel->reltarget,
+ parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+ AGGSPLIT_FINAL_DESERIAL,
+ parse->groupClause,
+ havingQual,
+ agg_final_costs,
+ dNumGroups));
+ else
+ add_path(grouped_rel, (Path *)
+ create_group_path(root,
+ grouped_rel,
+ path,
+ parse->groupClause,
+ havingQual,
+ dNumGroups));
+ }
+ }
+ }
+
+ if (can_hash)
+ {
+ if (parse->groupingSets)
+ {
+ /*
+ * Try for a hash-only groupingsets path over unsorted input.
+ */
+ consider_groupingsets_paths(root, grouped_rel,
+ cheapest_path, false, true,
+ gd, agg_costs, dNumGroups);
+ }
+ else
+ {
+ /*
+ * Generate a HashAgg Path. We just need an Agg over the
+ * cheapest-total input path, since input order won't matter.
+ */
+ add_path(grouped_rel, (Path *)
+ create_agg_path(root, grouped_rel,
+ cheapest_path,
+ grouped_rel->reltarget,
+ AGG_HASHED,
+ AGGSPLIT_SIMPLE,
+ parse->groupClause,
+ havingQual,
+ agg_costs,
+ dNumGroups));
+ }
+
+ /*
+ * Generate a Finalize HashAgg Path atop of the cheapest partially
+ * grouped path, assuming there is one
+ */
+ if (partially_grouped_rel && partially_grouped_rel->pathlist)
+ {
+ Path *path = partially_grouped_rel->cheapest_total_path;
+
+ add_path(grouped_rel, (Path *)
+ create_agg_path(root,
+ grouped_rel,
+ path,
+ grouped_rel->reltarget,
+ AGG_HASHED,
+ AGGSPLIT_FINAL_DESERIAL,
+ parse->groupClause,
+ havingQual,
+ agg_final_costs,
+ dNumGroups));
+ }
+ }
+
+ /*
+ * When partitionwise aggregate is used, we might have fully aggregated
+ * paths in the partial pathlist, because add_paths_to_append_rel() will
+ * consider a path for grouped_rel consisting of a Parallel Append of
+ * non-partial paths from each child.
+ */
+ if (grouped_rel->partial_pathlist != NIL)
+ gather_grouping_paths(root, grouped_rel);
+}
+
+/*
+ * create_partial_grouping_paths
+ *
+ * Create a new upper relation representing the result of partial aggregation
+ * and populate it with appropriate paths. Note that we don't finalize the
+ * lists of paths here, so the caller can add additional partial or non-partial
+ * paths and must afterward call gather_grouping_paths and set_cheapest on
+ * the returned upper relation.
+ *
+ * All paths for this new upper relation -- both partial and non-partial --
+ * have been partially aggregated but require a subsequent FinalizeAggregate
+ * step.
+ *
+ * NB: This function is allowed to return NULL if it determines that there is
+ * no real need to create a new RelOptInfo.
+ */
+static RelOptInfo *
+create_partial_grouping_paths(PlannerInfo *root,
+ RelOptInfo *grouped_rel,
+ RelOptInfo *input_rel,
+ grouping_sets_data *gd,
+ GroupPathExtraData *extra,
+ bool force_rel_creation)
+{
+ Query *parse = root->parse;
+ RelOptInfo *partially_grouped_rel;
+ AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
+ AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
+ Path *cheapest_partial_path = NULL;
+ Path *cheapest_total_path = NULL;
+ double dNumPartialGroups = 0;
+ double dNumPartialPartialGroups = 0;
+ ListCell *lc;
+ bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
+ bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
+
+ /*
+ * Consider whether we should generate partially aggregated non-partial
+ * paths. We can only do this if we have a non-partial path, and only if
+ * the parent of the input rel is performing partial partitionwise
+ * aggregation. (Note that extra->patype is the type of partitionwise
+ * aggregation being used at the parent level, not this level.)
+ */
+ if (input_rel->pathlist != NIL &&
+ extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
+ cheapest_total_path = input_rel->cheapest_total_path;
+
+ /*
+ * If parallelism is possible for grouped_rel, then we should consider
+ * generating partially-grouped partial paths. However, if the input rel
+ * has no partial paths, then we can't.
+ */
+ if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
+ cheapest_partial_path = linitial(input_rel->partial_pathlist);
+
+ /*
+ * If we can't partially aggregate partial paths, and we can't partially
+ * aggregate non-partial paths, then don't bother creating the new
+ * RelOptInfo at all, unless the caller specified force_rel_creation.
+ */
+ if (cheapest_total_path == NULL &&
+ cheapest_partial_path == NULL &&
+ !force_rel_creation)
+ return NULL;
+
+ /*
+ * Build a new upper relation to represent the result of partially
+ * aggregating the rows from the input relation.
+ */
+ partially_grouped_rel = fetch_upper_rel(root,
+ UPPERREL_PARTIAL_GROUP_AGG,
+ grouped_rel->relids);
+ partially_grouped_rel->consider_parallel =
+ grouped_rel->consider_parallel;
+ partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
+ partially_grouped_rel->serverid = grouped_rel->serverid;
+ partially_grouped_rel->userid = grouped_rel->userid;
+ partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
+ partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
+
+ /*
+ * Build target list for partial aggregate paths. These paths cannot just
+ * emit the same tlist as regular aggregate paths, because (1) we must
+ * include Vars and Aggrefs needed in HAVING, which might not appear in
+ * the result tlist, and (2) the Aggrefs must be set in partial mode.
+ */
+ partially_grouped_rel->reltarget =
+ make_partial_grouping_target(root, grouped_rel->reltarget,
+ extra->havingQual);
+
+ if (!extra->partial_costs_set)
+ {
+ /*
+ * Collect statistics about aggregates for estimating costs of
+ * performing aggregation in parallel.
+ */
+ MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
+ MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
+ if (parse->hasAggs)
+ {
+ /* partial phase */
+ get_agg_clause_costs(root, AGGSPLIT_INITIAL_SERIAL,
+ agg_partial_costs);
+
+ /* final phase */
+ get_agg_clause_costs(root, AGGSPLIT_FINAL_DESERIAL,
+ agg_final_costs);
+ }
+
+ extra->partial_costs_set = true;
+ }
+
+ /* Estimate number of partial groups. */
+ if (cheapest_total_path != NULL)
+ dNumPartialGroups =
+ get_number_of_groups(root,
+ cheapest_total_path->rows,
+ gd,
+ extra->targetList);
+ if (cheapest_partial_path != NULL)
+ dNumPartialPartialGroups =
+ get_number_of_groups(root,
+ cheapest_partial_path->rows,
+ gd,
+ extra->targetList);
+
+ if (can_sort && cheapest_total_path != NULL)
+ {
+ /* This should have been checked previously */
+ Assert(parse->hasAggs || parse->groupClause);
+
+ /*
+ * Use any available suitably-sorted path as input, and also consider
+ * sorting the cheapest partial path.
+ */
+ foreach(lc, input_rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+ bool is_sorted;
+
+ is_sorted = pathkeys_contained_in(root->group_pathkeys,
+ path->pathkeys);
+ if (path == cheapest_total_path || is_sorted)
+ {
+ /* Sort the cheapest partial path, if it isn't already */
+ if (!is_sorted)
+ path = (Path *) create_sort_path(root,
+ partially_grouped_rel,
+ path,
+ root->group_pathkeys,
+ -1.0);
+
+ if (parse->hasAggs)
+ add_path(partially_grouped_rel, (Path *)
+ create_agg_path(root,
+ partially_grouped_rel,
+ path,
+ partially_grouped_rel->reltarget,
+ parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+ AGGSPLIT_INITIAL_SERIAL,
+ parse->groupClause,
+ NIL,
+ agg_partial_costs,
+ dNumPartialGroups));
+ else
+ add_path(partially_grouped_rel, (Path *)
+ create_group_path(root,
+ partially_grouped_rel,
+ path,
+ parse->groupClause,
+ NIL,
+ dNumPartialGroups));
+ }
+ }
+
+ /*
+ * Consider incremental sort on all partial paths, if enabled.
+ *
+ * We can also skip the entire loop when we only have a single-item
+ * group_pathkeys because then we can't possibly have a presorted
+ * prefix of the list without having the list be fully sorted.
+ */
+ if (enable_incremental_sort && list_length(root->group_pathkeys) > 1)
+ {
+ foreach(lc, input_rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+ bool is_sorted;
+ int presorted_keys;
+
+ is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
+ path->pathkeys,
+ &presorted_keys);
+
+ /* Ignore already sorted paths */
+ if (is_sorted)
+ continue;
+
+ if (presorted_keys == 0)
+ continue;
+
+ /* Since we have presorted keys, consider incremental sort. */
+ path = (Path *) create_incremental_sort_path(root,
+ partially_grouped_rel,
+ path,
+ root->group_pathkeys,
+ presorted_keys,
+ -1.0);
+
+ if (parse->hasAggs)
+ add_path(partially_grouped_rel, (Path *)
+ create_agg_path(root,
+ partially_grouped_rel,
+ path,
+ partially_grouped_rel->reltarget,
+ parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+ AGGSPLIT_INITIAL_SERIAL,
+ parse->groupClause,
+ NIL,
+ agg_partial_costs,
+ dNumPartialGroups));
+ else
+ add_path(partially_grouped_rel, (Path *)
+ create_group_path(root,
+ partially_grouped_rel,
+ path,
+ parse->groupClause,
+ NIL,
+ dNumPartialGroups));
+ }
+ }
+ }
+
+ if (can_sort && cheapest_partial_path != NULL)
+ {
+ /* Similar to above logic, but for partial paths. */
+ foreach(lc, input_rel->partial_pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+ Path *path_original = path;
+ bool is_sorted;
+ int presorted_keys;
+
+ is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
+ path->pathkeys,
+ &presorted_keys);
+
+ if (path == cheapest_partial_path || is_sorted)
+ {
+ /* Sort the cheapest partial path, if it isn't already */
+ if (!is_sorted)
+ path = (Path *) create_sort_path(root,
+ partially_grouped_rel,
+ path,
+ root->group_pathkeys,
+ -1.0);
+
+ if (parse->hasAggs)
+ add_partial_path(partially_grouped_rel, (Path *)
+ create_agg_path(root,
+ partially_grouped_rel,
+ path,
+ partially_grouped_rel->reltarget,
+ parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+ AGGSPLIT_INITIAL_SERIAL,
+ parse->groupClause,
+ NIL,
+ agg_partial_costs,
+ dNumPartialPartialGroups));
+ else
+ add_partial_path(partially_grouped_rel, (Path *)
+ create_group_path(root,
+ partially_grouped_rel,
+ path,
+ parse->groupClause,
+ NIL,
+ dNumPartialPartialGroups));
+ }
+
+ /*
+ * Now we may consider incremental sort on this path, but only
+ * when the path is not already sorted and when incremental sort
+ * is enabled.
+ */
+ if (is_sorted || !enable_incremental_sort)
+ continue;
+
+ /* Restore the input path (we might have added Sort on top). */
+ path = path_original;
+
+ /* no shared prefix, not point in building incremental sort */
+ if (presorted_keys == 0)
+ continue;
+
+ /*
+ * We should have already excluded pathkeys of length 1 because
+ * then presorted_keys > 0 would imply is_sorted was true.
+ */
+ Assert(list_length(root->group_pathkeys) != 1);
+
+ path = (Path *) create_incremental_sort_path(root,
+ partially_grouped_rel,
+ path,
+ root->group_pathkeys,
+ presorted_keys,
+ -1.0);
+
+ if (parse->hasAggs)
+ add_partial_path(partially_grouped_rel, (Path *)
+ create_agg_path(root,
+ partially_grouped_rel,
+ path,
+ partially_grouped_rel->reltarget,
+ parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+ AGGSPLIT_INITIAL_SERIAL,
+ parse->groupClause,
+ NIL,
+ agg_partial_costs,
+ dNumPartialPartialGroups));
+ else
+ add_partial_path(partially_grouped_rel, (Path *)
+ create_group_path(root,
+ partially_grouped_rel,
+ path,
+ parse->groupClause,
+ NIL,
+ dNumPartialPartialGroups));
+ }
+ }
+
+ /*
+ * Add a partially-grouped HashAgg Path where possible
+ */
+ if (can_hash && cheapest_total_path != NULL)
+ {
+ /* Checked above */
+ Assert(parse->hasAggs || parse->groupClause);
+
+ add_path(partially_grouped_rel, (Path *)
+ create_agg_path(root,
+ partially_grouped_rel,
+ cheapest_total_path,
+ partially_grouped_rel->reltarget,
+ AGG_HASHED,
+ AGGSPLIT_INITIAL_SERIAL,
+ parse->groupClause,
+ NIL,
+ agg_partial_costs,
+ dNumPartialGroups));
+ }
+
+ /*
+ * Now add a partially-grouped HashAgg partial Path where possible
+ */
+ if (can_hash && cheapest_partial_path != NULL)
+ {
+ add_partial_path(partially_grouped_rel, (Path *)
+ create_agg_path(root,
+ partially_grouped_rel,
+ cheapest_partial_path,
+ partially_grouped_rel->reltarget,
+ AGG_HASHED,
+ AGGSPLIT_INITIAL_SERIAL,
+ parse->groupClause,
+ NIL,
+ agg_partial_costs,
+ dNumPartialPartialGroups));
+ }
+
+ /*
+ * If there is an FDW that's responsible for all baserels of the query,
+ * let it consider adding partially grouped ForeignPaths.
+ */
+ if (partially_grouped_rel->fdwroutine &&
+ partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
+ {
+ FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
+
+ fdwroutine->GetForeignUpperPaths(root,
+ UPPERREL_PARTIAL_GROUP_AGG,
+ input_rel, partially_grouped_rel,
+ extra);
+ }
+
+ return partially_grouped_rel;
+}
+
+/*
+ * Generate Gather and Gather Merge paths for a grouping relation or partial
+ * grouping relation.
+ *
+ * generate_useful_gather_paths does most of the work, but we also consider a
+ * special case: we could try sorting the data by the group_pathkeys and then
+ * applying Gather Merge.
+ *
+ * NB: This function shouldn't be used for anything other than a grouped or
+ * partially grouped relation not only because of the fact that it explicitly
+ * references group_pathkeys but we pass "true" as the third argument to
+ * generate_useful_gather_paths().
+ */
+static void
+gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
+{
+ ListCell *lc;
+ Path *cheapest_partial_path;
+
+ /* Try Gather for unordered paths and Gather Merge for ordered ones. */
+ generate_useful_gather_paths(root, rel, true);
+
+ /* Try cheapest partial path + explicit Sort + Gather Merge. */
+ cheapest_partial_path = linitial(rel->partial_pathlist);
+ if (!pathkeys_contained_in(root->group_pathkeys,
+ cheapest_partial_path->pathkeys))
+ {
+ Path *path;
+ double total_groups;
+
+ total_groups =
+ cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
+ path = (Path *) create_sort_path(root, rel, cheapest_partial_path,
+ root->group_pathkeys,
+ -1.0);
+ path = (Path *)
+ create_gather_merge_path(root,
+ rel,
+ path,
+ rel->reltarget,
+ root->group_pathkeys,
+ NULL,
+ &total_groups);
+
+ add_path(rel, path);
+ }
+
+ /*
+ * Consider incremental sort on all partial paths, if enabled.
+ *
+ * We can also skip the entire loop when we only have a single-item
+ * group_pathkeys because then we can't possibly have a presorted prefix
+ * of the list without having the list be fully sorted.
+ */
+ if (!enable_incremental_sort || list_length(root->group_pathkeys) == 1)
+ return;
+
+ /* also consider incremental sort on partial paths, if enabled */
+ foreach(lc, rel->partial_pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+ bool is_sorted;
+ int presorted_keys;
+ double total_groups;
+
+ is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
+ path->pathkeys,
+ &presorted_keys);
+
+ if (is_sorted)
+ continue;
+
+ if (presorted_keys == 0)
+ continue;
+
+ path = (Path *) create_incremental_sort_path(root,
+ rel,
+ path,
+ root->group_pathkeys,
+ presorted_keys,
+ -1.0);
+
+ path = (Path *)
+ create_gather_merge_path(root,
+ rel,
+ path,
+ rel->reltarget,
+ root->group_pathkeys,
+ NULL,
+ &total_groups);
+
+ add_path(rel, path);
+ }
+}
+
+/*
+ * can_partial_agg
+ *
+ * Determines whether or not partial grouping and/or aggregation is possible.
+ * Returns true when possible, false otherwise.
+ */
+static bool
+can_partial_agg(PlannerInfo *root)
+{
+ Query *parse = root->parse;
+
+ if (!parse->hasAggs && parse->groupClause == NIL)
+ {
+ /*
+ * We don't know how to do parallel aggregation unless we have either
+ * some aggregates or a grouping clause.
+ */
+ return false;
+ }
+ else if (parse->groupingSets)
+ {
+ /* We don't know how to do grouping sets in parallel. */
+ return false;
+ }
+ else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
+ {
+ /* Insufficient support for partial mode. */
+ return false;
+ }
+
+ /* Everything looks good. */
+ return true;
+}
+
+/*
+ * apply_scanjoin_target_to_paths
+ *
+ * Adjust the final scan/join relation, and recursively all of its children,
+ * to generate the final scan/join target. It would be more correct to model
+ * this as a separate planning step with a new RelOptInfo at the toplevel and
+ * for each child relation, but doing it this way is noticeably cheaper.
+ * Maybe that problem can be solved at some point, but for now we do this.
+ *
+ * If tlist_same_exprs is true, then the scan/join target to be applied has
+ * the same expressions as the existing reltarget, so we need only insert the
+ * appropriate sortgroupref information. By avoiding the creation of
+ * projection paths we save effort both immediately and at plan creation time.
+ */
+static void
+apply_scanjoin_target_to_paths(PlannerInfo *root,
+ RelOptInfo *rel,
+ List *scanjoin_targets,
+ List *scanjoin_targets_contain_srfs,
+ bool scanjoin_target_parallel_safe,
+ bool tlist_same_exprs)
+{
+ bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
+ PathTarget *scanjoin_target;
+ ListCell *lc;
+
+ /* This recurses, so be paranoid. */
+ check_stack_depth();
+
+ /*
+ * If the rel is partitioned, we want to drop its existing paths and
+ * generate new ones. This function would still be correct if we kept the
+ * existing paths: we'd modify them to generate the correct target above
+ * the partitioning Append, and then they'd compete on cost with paths
+ * generating the target below the Append. However, in our current cost
+ * model the latter way is always the same or cheaper cost, so modifying
+ * the existing paths would just be useless work. Moreover, when the cost
+ * is the same, varying roundoff errors might sometimes allow an existing
+ * path to be picked, resulting in undesirable cross-platform plan
+ * variations. So we drop old paths and thereby force the work to be done
+ * below the Append, except in the case of a non-parallel-safe target.
+ *
+ * Some care is needed, because we have to allow
+ * generate_useful_gather_paths to see the old partial paths in the next
+ * stanza. Hence, zap the main pathlist here, then allow
+ * generate_useful_gather_paths to add path(s) to the main list, and
+ * finally zap the partial pathlist.
+ */
+ if (rel_is_partitioned)
+ rel->pathlist = NIL;
+
+ /*
+ * If the scan/join target is not parallel-safe, partial paths cannot
+ * generate it.
+ */
+ if (!scanjoin_target_parallel_safe)
+ {
+ /*
+ * Since we can't generate the final scan/join target in parallel
+ * workers, this is our last opportunity to use any partial paths that
+ * exist; so build Gather path(s) that use them and emit whatever the
+ * current reltarget is. We don't do this in the case where the
+ * target is parallel-safe, since we will be able to generate superior
+ * paths by doing it after the final scan/join target has been
+ * applied.
+ */
+ generate_useful_gather_paths(root, rel, false);
+
+ /* Can't use parallel query above this level. */
+ rel->partial_pathlist = NIL;
+ rel->consider_parallel = false;
+ }
+
+ /* Finish dropping old paths for a partitioned rel, per comment above */
+ if (rel_is_partitioned)
+ rel->partial_pathlist = NIL;
+
+ /* Extract SRF-free scan/join target. */
+ scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
+
+ /*
+ * Apply the SRF-free scan/join target to each existing path.
+ *
+ * If the tlist exprs are the same, we can just inject the sortgroupref
+ * information into the existing pathtargets. Otherwise, replace each
+ * path with a projection path that generates the SRF-free scan/join
+ * target. This can't change the ordering of paths within rel->pathlist,
+ * so we just modify the list in place.
+ */
+ foreach(lc, rel->pathlist)
+ {
+ Path *subpath = (Path *) lfirst(lc);
+
+ /* Shouldn't have any parameterized paths anymore */
+ Assert(subpath->param_info == NULL);
+
+ if (tlist_same_exprs)
+ subpath->pathtarget->sortgrouprefs =
+ scanjoin_target->sortgrouprefs;
+ else
+ {
+ Path *newpath;
+
+ newpath = (Path *) create_projection_path(root, rel, subpath,
+ scanjoin_target);
+ lfirst(lc) = newpath;
+ }
+ }
+
+ /* Likewise adjust the targets for any partial paths. */
+ foreach(lc, rel->partial_pathlist)
+ {
+ Path *subpath = (Path *) lfirst(lc);
+
+ /* Shouldn't have any parameterized paths anymore */
+ Assert(subpath->param_info == NULL);
+
+ if (tlist_same_exprs)
+ subpath->pathtarget->sortgrouprefs =
+ scanjoin_target->sortgrouprefs;
+ else
+ {
+ Path *newpath;
+
+ newpath = (Path *) create_projection_path(root, rel, subpath,
+ scanjoin_target);
+ lfirst(lc) = newpath;
+ }
+ }
+
+ /*
+ * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
+ * atop each existing path. (Note that this function doesn't look at the
+ * cheapest-path fields, which is a good thing because they're bogus right
+ * now.)
+ */
+ if (root->parse->hasTargetSRFs)
+ adjust_paths_for_srfs(root, rel,
+ scanjoin_targets,
+ scanjoin_targets_contain_srfs);
+
+ /*
+ * Update the rel's target to be the final (with SRFs) scan/join target.
+ * This now matches the actual output of all the paths, and we might get
+ * confused in createplan.c if they don't agree. We must do this now so
+ * that any append paths made in the next part will use the correct
+ * pathtarget (cf. create_append_path).
+ *
+ * Note that this is also necessary if GetForeignUpperPaths() gets called
+ * on the final scan/join relation or on any of its children, since the
+ * FDW might look at the rel's target to create ForeignPaths.
+ */
+ rel->reltarget = llast_node(PathTarget, scanjoin_targets);
+
+ /*
+ * If the relation is partitioned, recursively apply the scan/join target
+ * to all partitions, and generate brand-new Append paths in which the
+ * scan/join target is computed below the Append rather than above it.
+ * Since Append is not projection-capable, that might save a separate
+ * Result node, and it also is important for partitionwise aggregate.
+ */
+ if (rel_is_partitioned)
+ {
+ List *live_children = NIL;
+ int i;
+
+ /* Adjust each partition. */
+ i = -1;
+ while ((i = bms_next_member(rel->live_parts, i)) >= 0)
+ {
+ RelOptInfo *child_rel = rel->part_rels[i];
+ AppendRelInfo **appinfos;
+ int nappinfos;
+ List *child_scanjoin_targets = NIL;
+ ListCell *lc;
+
+ Assert(child_rel != NULL);
+
+ /* Dummy children can be ignored. */
+ if (IS_DUMMY_REL(child_rel))
+ continue;
+
+ /* Translate scan/join targets for this child. */
+ appinfos = find_appinfos_by_relids(root, child_rel->relids,
+ &nappinfos);
+ foreach(lc, scanjoin_targets)
+ {
+ PathTarget *target = lfirst_node(PathTarget, lc);
+
+ target = copy_pathtarget(target);
+ target->exprs = (List *)
+ adjust_appendrel_attrs(root,
+ (Node *) target->exprs,
+ nappinfos, appinfos);
+ child_scanjoin_targets = lappend(child_scanjoin_targets,
+ target);
+ }
+ pfree(appinfos);
+
+ /* Recursion does the real work. */
+ apply_scanjoin_target_to_paths(root, child_rel,
+ child_scanjoin_targets,
+ scanjoin_targets_contain_srfs,
+ scanjoin_target_parallel_safe,
+ tlist_same_exprs);
+
+ /* Save non-dummy children for Append paths. */
+ if (!IS_DUMMY_REL(child_rel))
+ live_children = lappend(live_children, child_rel);
+ }
+
+ /* Build new paths for this relation by appending child paths. */
+ add_paths_to_append_rel(root, rel, live_children);
+ }
+
+ /*
+ * Consider generating Gather or Gather Merge paths. We must only do this
+ * if the relation is parallel safe, and we don't do it for child rels to
+ * avoid creating multiple Gather nodes within the same plan. We must do
+ * this after all paths have been generated and before set_cheapest, since
+ * one of the generated paths may turn out to be the cheapest one.
+ */
+ if (rel->consider_parallel && !IS_OTHER_REL(rel))
+ generate_useful_gather_paths(root, rel, false);
+
+ /*
+ * Reassess which paths are the cheapest, now that we've potentially added
+ * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
+ * this relation.
+ */
+ set_cheapest(rel);
+}
+
+/*
+ * create_partitionwise_grouping_paths
+ *
+ * If the partition keys of input relation are part of the GROUP BY clause, all
+ * the rows belonging to a given group come from a single partition. This
+ * allows aggregation/grouping over a partitioned relation to be broken down
+ * into aggregation/grouping on each partition. This should be no worse, and
+ * often better, than the normal approach.
+ *
+ * However, if the GROUP BY clause does not contain all the partition keys,
+ * rows from a given group may be spread across multiple partitions. In that
+ * case, we perform partial aggregation for each group, append the results,
+ * and then finalize aggregation. This is less certain to win than the
+ * previous case. It may win if the PartialAggregate stage greatly reduces
+ * the number of groups, because fewer rows will pass through the Append node.
+ * It may lose if we have lots of small groups.
+ */
+static void
+create_partitionwise_grouping_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ RelOptInfo *grouped_rel,
+ RelOptInfo *partially_grouped_rel,
+ const AggClauseCosts *agg_costs,
+ grouping_sets_data *gd,
+ PartitionwiseAggregateType patype,
+ GroupPathExtraData *extra)
+{
+ List *grouped_live_children = NIL;
+ List *partially_grouped_live_children = NIL;
+ PathTarget *target = grouped_rel->reltarget;
+ bool partial_grouping_valid = true;
+ int i;
+
+ Assert(patype != PARTITIONWISE_AGGREGATE_NONE);
+ Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL ||
+ partially_grouped_rel != NULL);
+
+ /* Add paths for partitionwise aggregation/grouping. */
+ i = -1;
+ while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
+ {
+ RelOptInfo *child_input_rel = input_rel->part_rels[i];
+ PathTarget *child_target;
+ AppendRelInfo **appinfos;
+ int nappinfos;
+ GroupPathExtraData child_extra;
+ RelOptInfo *child_grouped_rel;
+ RelOptInfo *child_partially_grouped_rel;
+
+ Assert(child_input_rel != NULL);
+
+ /* Dummy children can be ignored. */
+ if (IS_DUMMY_REL(child_input_rel))
+ continue;
+
+ child_target = copy_pathtarget(target);
+
+ /*
+ * Copy the given "extra" structure as is and then override the
+ * members specific to this child.
+ */
+ memcpy(&child_extra, extra, sizeof(child_extra));
+
+ appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
+ &nappinfos);
+
+ child_target->exprs = (List *)
+ adjust_appendrel_attrs(root,
+ (Node *) target->exprs,
+ nappinfos, appinfos);
+
+ /* Translate havingQual and targetList. */
+ child_extra.havingQual = (Node *)
+ adjust_appendrel_attrs(root,
+ extra->havingQual,
+ nappinfos, appinfos);
+ child_extra.targetList = (List *)
+ adjust_appendrel_attrs(root,
+ (Node *) extra->targetList,
+ nappinfos, appinfos);
+
+ /*
+ * extra->patype was the value computed for our parent rel; patype is
+ * the value for this relation. For the child, our value is its
+ * parent rel's value.
+ */
+ child_extra.patype = patype;
+
+ /*
+ * Create grouping relation to hold fully aggregated grouping and/or
+ * aggregation paths for the child.
+ */
+ child_grouped_rel = make_grouping_rel(root, child_input_rel,
+ child_target,
+ extra->target_parallel_safe,
+ child_extra.havingQual);
+
+ /* Create grouping paths for this child relation. */
+ create_ordinary_grouping_paths(root, child_input_rel,
+ child_grouped_rel,
+ agg_costs, gd, &child_extra,
+ &child_partially_grouped_rel);
+
+ if (child_partially_grouped_rel)
+ {
+ partially_grouped_live_children =
+ lappend(partially_grouped_live_children,
+ child_partially_grouped_rel);
+ }
+ else
+ partial_grouping_valid = false;
+
+ if (patype == PARTITIONWISE_AGGREGATE_FULL)
+ {
+ set_cheapest(child_grouped_rel);
+ grouped_live_children = lappend(grouped_live_children,
+ child_grouped_rel);
+ }
+
+ pfree(appinfos);
+ }
+
+ /*
+ * Try to create append paths for partially grouped children. For full
+ * partitionwise aggregation, we might have paths in the partial_pathlist
+ * if parallel aggregation is possible. For partial partitionwise
+ * aggregation, we may have paths in both pathlist and partial_pathlist.
+ *
+ * NB: We must have a partially grouped path for every child in order to
+ * generate a partially grouped path for this relation.
+ */
+ if (partially_grouped_rel && partial_grouping_valid)
+ {
+ Assert(partially_grouped_live_children != NIL);
+
+ add_paths_to_append_rel(root, partially_grouped_rel,
+ partially_grouped_live_children);
+
+ /*
+ * We need call set_cheapest, since the finalization step will use the
+ * cheapest path from the rel.
+ */
+ if (partially_grouped_rel->pathlist)
+ set_cheapest(partially_grouped_rel);
+ }
+
+ /* If possible, create append paths for fully grouped children. */
+ if (patype == PARTITIONWISE_AGGREGATE_FULL)
+ {
+ Assert(grouped_live_children != NIL);
+
+ add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
+ }
+}
+
+/*
+ * group_by_has_partkey
+ *
+ * Returns true, if all the partition keys of the given relation are part of
+ * the GROUP BY clauses, false otherwise.
+ */
+static bool
+group_by_has_partkey(RelOptInfo *input_rel,
+ List *targetList,
+ List *groupClause)
+{
+ List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
+ int cnt = 0;
+ int partnatts;
+
+ /* Input relation should be partitioned. */
+ Assert(input_rel->part_scheme);
+
+ /* Rule out early, if there are no partition keys present. */
+ if (!input_rel->partexprs)
+ return false;
+
+ partnatts = input_rel->part_scheme->partnatts;
+
+ for (cnt = 0; cnt < partnatts; cnt++)
+ {
+ List *partexprs = input_rel->partexprs[cnt];
+ ListCell *lc;
+ bool found = false;
+
+ foreach(lc, partexprs)
+ {
+ Expr *partexpr = lfirst(lc);
+
+ if (list_member(groupexprs, partexpr))
+ {
+ found = true;
+ break;
+ }
+ }
+
+ /*
+ * If none of the partition key expressions match with any of the
+ * GROUP BY expression, return false.
+ */
+ if (!found)
+ return false;
+ }
+
+ return true;
+}
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
new file mode 100644
index 0000000..9d912a8
--- /dev/null
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -0,0 +1,3398 @@
+/*-------------------------------------------------------------------------
+ *
+ * setrefs.c
+ * Post-processing of a completed plan tree: fix references to subplan
+ * vars, compute regproc values for operators, etc
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/plan/setrefs.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/transam.h"
+#include "catalog/pg_type.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/planmain.h"
+#include "optimizer/planner.h"
+#include "optimizer/tlist.h"
+#include "tcop/utility.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+
+
+typedef struct
+{
+ int varno; /* RT index of Var */
+ AttrNumber varattno; /* attr number of Var */
+ AttrNumber resno; /* TLE position of Var */
+} tlist_vinfo;
+
+typedef struct
+{
+ List *tlist; /* underlying target list */
+ int num_vars; /* number of plain Var tlist entries */
+ bool has_ph_vars; /* are there PlaceHolderVar entries? */
+ bool has_non_vars; /* are there other entries? */
+ tlist_vinfo vars[FLEXIBLE_ARRAY_MEMBER]; /* has num_vars entries */
+} indexed_tlist;
+
+typedef struct
+{
+ PlannerInfo *root;
+ int rtoffset;
+ double num_exec;
+} fix_scan_expr_context;
+
+typedef struct
+{
+ PlannerInfo *root;
+ indexed_tlist *outer_itlist;
+ indexed_tlist *inner_itlist;
+ Index acceptable_rel;
+ int rtoffset;
+ double num_exec;
+} fix_join_expr_context;
+
+typedef struct
+{
+ PlannerInfo *root;
+ indexed_tlist *subplan_itlist;
+ int newvarno;
+ int rtoffset;
+ double num_exec;
+} fix_upper_expr_context;
+
+typedef struct
+{
+ PlannerInfo *root;
+ indexed_tlist *subplan_itlist;
+ int newvarno;
+} fix_windowagg_cond_context;
+
+/*
+ * Selecting the best alternative in an AlternativeSubPlan expression requires
+ * estimating how many times that expression will be evaluated. For an
+ * expression in a plan node's targetlist, the plan's estimated number of
+ * output rows is clearly what to use, but for an expression in a qual it's
+ * far less clear. Since AlternativeSubPlans aren't heavily used, we don't
+ * want to expend a lot of cycles making such estimates. What we use is twice
+ * the number of output rows. That's not entirely unfounded: we know that
+ * clause_selectivity() would fall back to a default selectivity estimate
+ * of 0.5 for any SubPlan, so if the qual containing the SubPlan is the last
+ * to be applied (which it likely would be, thanks to order_qual_clauses()),
+ * this matches what we could have estimated in a far more laborious fashion.
+ * Obviously there are many other scenarios, but it's probably not worth the
+ * trouble to try to improve on this estimate, especially not when we don't
+ * have a better estimate for the selectivity of the SubPlan qual itself.
+ */
+#define NUM_EXEC_TLIST(parentplan) ((parentplan)->plan_rows)
+#define NUM_EXEC_QUAL(parentplan) ((parentplan)->plan_rows * 2.0)
+
+/*
+ * Check if a Const node is a regclass value. We accept plain OID too,
+ * since a regclass Const will get folded to that type if it's an argument
+ * to oideq or similar operators. (This might result in some extraneous
+ * values in a plan's list of relation dependencies, but the worst result
+ * would be occasional useless replans.)
+ */
+#define ISREGCLASSCONST(con) \
+ (((con)->consttype == REGCLASSOID || (con)->consttype == OIDOID) && \
+ !(con)->constisnull)
+
+#define fix_scan_list(root, lst, rtoffset, num_exec) \
+ ((List *) fix_scan_expr(root, (Node *) (lst), rtoffset, num_exec))
+
+static void add_rtes_to_flat_rtable(PlannerInfo *root, bool recursing);
+static void flatten_unplanned_rtes(PlannerGlobal *glob, RangeTblEntry *rte);
+static bool flatten_rtes_walker(Node *node, PlannerGlobal *glob);
+static void add_rte_to_flat_rtable(PlannerGlobal *glob, RangeTblEntry *rte);
+static Plan *set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset);
+static Plan *set_indexonlyscan_references(PlannerInfo *root,
+ IndexOnlyScan *plan,
+ int rtoffset);
+static Plan *set_subqueryscan_references(PlannerInfo *root,
+ SubqueryScan *plan,
+ int rtoffset);
+static Plan *clean_up_removed_plan_level(Plan *parent, Plan *child);
+static void set_foreignscan_references(PlannerInfo *root,
+ ForeignScan *fscan,
+ int rtoffset);
+static void set_customscan_references(PlannerInfo *root,
+ CustomScan *cscan,
+ int rtoffset);
+static Plan *set_append_references(PlannerInfo *root,
+ Append *aplan,
+ int rtoffset);
+static Plan *set_mergeappend_references(PlannerInfo *root,
+ MergeAppend *mplan,
+ int rtoffset);
+static void set_hash_references(PlannerInfo *root, Plan *plan, int rtoffset);
+static Relids offset_relid_set(Relids relids, int rtoffset);
+static Node *fix_scan_expr(PlannerInfo *root, Node *node,
+ int rtoffset, double num_exec);
+static Node *fix_scan_expr_mutator(Node *node, fix_scan_expr_context *context);
+static bool fix_scan_expr_walker(Node *node, fix_scan_expr_context *context);
+static void set_join_references(PlannerInfo *root, Join *join, int rtoffset);
+static void set_upper_references(PlannerInfo *root, Plan *plan, int rtoffset);
+static void set_param_references(PlannerInfo *root, Plan *plan);
+static Node *convert_combining_aggrefs(Node *node, void *context);
+static void set_dummy_tlist_references(Plan *plan, int rtoffset);
+static indexed_tlist *build_tlist_index(List *tlist);
+static Var *search_indexed_tlist_for_var(Var *var,
+ indexed_tlist *itlist,
+ int newvarno,
+ int rtoffset);
+static Var *search_indexed_tlist_for_non_var(Expr *node,
+ indexed_tlist *itlist,
+ int newvarno);
+static Var *search_indexed_tlist_for_sortgroupref(Expr *node,
+ Index sortgroupref,
+ indexed_tlist *itlist,
+ int newvarno);
+static List *fix_join_expr(PlannerInfo *root,
+ List *clauses,
+ indexed_tlist *outer_itlist,
+ indexed_tlist *inner_itlist,
+ Index acceptable_rel,
+ int rtoffset, double num_exec);
+static Node *fix_join_expr_mutator(Node *node,
+ fix_join_expr_context *context);
+static Node *fix_upper_expr(PlannerInfo *root,
+ Node *node,
+ indexed_tlist *subplan_itlist,
+ int newvarno,
+ int rtoffset, double num_exec);
+static Node *fix_upper_expr_mutator(Node *node,
+ fix_upper_expr_context *context);
+static List *set_returning_clause_references(PlannerInfo *root,
+ List *rlist,
+ Plan *topplan,
+ Index resultRelation,
+ int rtoffset);
+static List *set_windowagg_runcondition_references(PlannerInfo *root,
+ List *runcondition,
+ Plan *plan);
+
+
+/*****************************************************************************
+ *
+ * SUBPLAN REFERENCES
+ *
+ *****************************************************************************/
+
+/*
+ * set_plan_references
+ *
+ * This is the final processing pass of the planner/optimizer. The plan
+ * tree is complete; we just have to adjust some representational details
+ * for the convenience of the executor:
+ *
+ * 1. We flatten the various subquery rangetables into a single list, and
+ * zero out RangeTblEntry fields that are not useful to the executor.
+ *
+ * 2. We adjust Vars in scan nodes to be consistent with the flat rangetable.
+ *
+ * 3. We adjust Vars in upper plan nodes to refer to the outputs of their
+ * subplans.
+ *
+ * 4. Aggrefs in Agg plan nodes need to be adjusted in some cases involving
+ * partial aggregation or minmax aggregate optimization.
+ *
+ * 5. PARAM_MULTIEXPR Params are replaced by regular PARAM_EXEC Params,
+ * now that we have finished planning all MULTIEXPR subplans.
+ *
+ * 6. AlternativeSubPlan expressions are replaced by just one of their
+ * alternatives, using an estimate of how many times they'll be executed.
+ *
+ * 7. We compute regproc OIDs for operators (ie, we look up the function
+ * that implements each op).
+ *
+ * 8. We create lists of specific objects that the plan depends on.
+ * This will be used by plancache.c to drive invalidation of cached plans.
+ * Relation dependencies are represented by OIDs, and everything else by
+ * PlanInvalItems (this distinction is motivated by the shared-inval APIs).
+ * Currently, relations, user-defined functions, and domains are the only
+ * types of objects that are explicitly tracked this way.
+ *
+ * 9. We assign every plan node in the tree a unique ID.
+ *
+ * We also perform one final optimization step, which is to delete
+ * SubqueryScan, Append, and MergeAppend plan nodes that aren't doing
+ * anything useful. The reason for doing this last is that
+ * it can't readily be done before set_plan_references, because it would
+ * break set_upper_references: the Vars in the child plan's top tlist
+ * wouldn't match up with the Vars in the outer plan tree. A SubqueryScan
+ * serves a necessary function as a buffer between outer query and subquery
+ * variable numbering ... but after we've flattened the rangetable this is
+ * no longer a problem, since then there's only one rtindex namespace.
+ * Likewise, Append and MergeAppend buffer between the parent and child vars
+ * of an appendrel, but we don't need to worry about that once we've done
+ * set_plan_references.
+ *
+ * set_plan_references recursively traverses the whole plan tree.
+ *
+ * The return value is normally the same Plan node passed in, but can be
+ * different when the passed-in Plan is a node we decide isn't needed.
+ *
+ * The flattened rangetable entries are appended to root->glob->finalrtable.
+ * Also, rowmarks entries are appended to root->glob->finalrowmarks, and the
+ * RT indexes of ModifyTable result relations to root->glob->resultRelations,
+ * and flattened AppendRelInfos are appended to root->glob->appendRelations.
+ * Plan dependencies are appended to root->glob->relationOids (for relations)
+ * and root->glob->invalItems (for everything else).
+ *
+ * Notice that we modify Plan nodes in-place, but use expression_tree_mutator
+ * to process targetlist and qual expressions. We can assume that the Plan
+ * nodes were just built by the planner and are not multiply referenced, but
+ * it's not so safe to assume that for expression tree nodes.
+ */
+Plan *
+set_plan_references(PlannerInfo *root, Plan *plan)
+{
+ Plan *result;
+ PlannerGlobal *glob = root->glob;
+ int rtoffset = list_length(glob->finalrtable);
+ ListCell *lc;
+
+ /*
+ * Add all the query's RTEs to the flattened rangetable. The live ones
+ * will have their rangetable indexes increased by rtoffset. (Additional
+ * RTEs, not referenced by the Plan tree, might get added after those.)
+ */
+ add_rtes_to_flat_rtable(root, false);
+
+ /*
+ * Adjust RT indexes of PlanRowMarks and add to final rowmarks list
+ */
+ foreach(lc, root->rowMarks)
+ {
+ PlanRowMark *rc = lfirst_node(PlanRowMark, lc);
+ PlanRowMark *newrc;
+
+ /* flat copy is enough since all fields are scalars */
+ newrc = (PlanRowMark *) palloc(sizeof(PlanRowMark));
+ memcpy(newrc, rc, sizeof(PlanRowMark));
+
+ /* adjust indexes ... but *not* the rowmarkId */
+ newrc->rti += rtoffset;
+ newrc->prti += rtoffset;
+
+ glob->finalrowmarks = lappend(glob->finalrowmarks, newrc);
+ }
+
+ /*
+ * Adjust RT indexes of AppendRelInfos and add to final appendrels list.
+ * We assume the AppendRelInfos were built during planning and don't need
+ * to be copied.
+ */
+ foreach(lc, root->append_rel_list)
+ {
+ AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc);
+
+ /* adjust RT indexes */
+ appinfo->parent_relid += rtoffset;
+ appinfo->child_relid += rtoffset;
+
+ /*
+ * Rather than adjust the translated_vars entries, just drop 'em.
+ * Neither the executor nor EXPLAIN currently need that data.
+ */
+ appinfo->translated_vars = NIL;
+
+ glob->appendRelations = lappend(glob->appendRelations, appinfo);
+ }
+
+ /* If needed, create workspace for processing AlternativeSubPlans */
+ if (root->hasAlternativeSubPlans)
+ {
+ root->isAltSubplan = (bool *)
+ palloc0(list_length(glob->subplans) * sizeof(bool));
+ root->isUsedSubplan = (bool *)
+ palloc0(list_length(glob->subplans) * sizeof(bool));
+ }
+
+ /* Now fix the Plan tree */
+ result = set_plan_refs(root, plan, rtoffset);
+
+ /*
+ * If we have AlternativeSubPlans, it is likely that we now have some
+ * unreferenced subplans in glob->subplans. To avoid expending cycles on
+ * those subplans later, get rid of them by setting those list entries to
+ * NULL. (Note: we can't do this immediately upon processing an
+ * AlternativeSubPlan, because there may be multiple copies of the
+ * AlternativeSubPlan, and they can get resolved differently.)
+ */
+ if (root->hasAlternativeSubPlans)
+ {
+ foreach(lc, glob->subplans)
+ {
+ int ndx = foreach_current_index(lc);
+
+ /*
+ * If it was used by some AlternativeSubPlan in this query level,
+ * but wasn't selected as best by any AlternativeSubPlan, then we
+ * don't need it. Do not touch subplans that aren't parts of
+ * AlternativeSubPlans.
+ */
+ if (root->isAltSubplan[ndx] && !root->isUsedSubplan[ndx])
+ lfirst(lc) = NULL;
+ }
+ }
+
+ return result;
+}
+
+/*
+ * Extract RangeTblEntries from the plan's rangetable, and add to flat rtable
+ *
+ * This can recurse into subquery plans; "recursing" is true if so.
+ */
+static void
+add_rtes_to_flat_rtable(PlannerInfo *root, bool recursing)
+{
+ PlannerGlobal *glob = root->glob;
+ Index rti;
+ ListCell *lc;
+
+ /*
+ * Add the query's own RTEs to the flattened rangetable.
+ *
+ * At top level, we must add all RTEs so that their indexes in the
+ * flattened rangetable match up with their original indexes. When
+ * recursing, we only care about extracting relation RTEs.
+ */
+ foreach(lc, root->parse->rtable)
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
+
+ if (!recursing || rte->rtekind == RTE_RELATION)
+ add_rte_to_flat_rtable(glob, rte);
+ }
+
+ /*
+ * If there are any dead subqueries, they are not referenced in the Plan
+ * tree, so we must add RTEs contained in them to the flattened rtable
+ * separately. (If we failed to do this, the executor would not perform
+ * expected permission checks for tables mentioned in such subqueries.)
+ *
+ * Note: this pass over the rangetable can't be combined with the previous
+ * one, because that would mess up the numbering of the live RTEs in the
+ * flattened rangetable.
+ */
+ rti = 1;
+ foreach(lc, root->parse->rtable)
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
+
+ /*
+ * We should ignore inheritance-parent RTEs: their contents have been
+ * pulled up into our rangetable already. Also ignore any subquery
+ * RTEs without matching RelOptInfos, as they likewise have been
+ * pulled up.
+ */
+ if (rte->rtekind == RTE_SUBQUERY && !rte->inh &&
+ rti < root->simple_rel_array_size)
+ {
+ RelOptInfo *rel = root->simple_rel_array[rti];
+
+ if (rel != NULL)
+ {
+ Assert(rel->relid == rti); /* sanity check on array */
+
+ /*
+ * The subquery might never have been planned at all, if it
+ * was excluded on the basis of self-contradictory constraints
+ * in our query level. In this case apply
+ * flatten_unplanned_rtes.
+ *
+ * If it was planned but the result rel is dummy, we assume
+ * that it has been omitted from our plan tree (see
+ * set_subquery_pathlist), and recurse to pull up its RTEs.
+ *
+ * Otherwise, it should be represented by a SubqueryScan node
+ * somewhere in our plan tree, and we'll pull up its RTEs when
+ * we process that plan node.
+ *
+ * However, if we're recursing, then we should pull up RTEs
+ * whether the subquery is dummy or not, because we've found
+ * that some upper query level is treating this one as dummy,
+ * and so we won't scan this level's plan tree at all.
+ */
+ if (rel->subroot == NULL)
+ flatten_unplanned_rtes(glob, rte);
+ else if (recursing ||
+ IS_DUMMY_REL(fetch_upper_rel(rel->subroot,
+ UPPERREL_FINAL, NULL)))
+ add_rtes_to_flat_rtable(rel->subroot, true);
+ }
+ }
+ rti++;
+ }
+}
+
+/*
+ * Extract RangeTblEntries from a subquery that was never planned at all
+ */
+static void
+flatten_unplanned_rtes(PlannerGlobal *glob, RangeTblEntry *rte)
+{
+ /* Use query_tree_walker to find all RTEs in the parse tree */
+ (void) query_tree_walker(rte->subquery,
+ flatten_rtes_walker,
+ (void *) glob,
+ QTW_EXAMINE_RTES_BEFORE);
+}
+
+static bool
+flatten_rtes_walker(Node *node, PlannerGlobal *glob)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, RangeTblEntry))
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) node;
+
+ /* As above, we need only save relation RTEs */
+ if (rte->rtekind == RTE_RELATION)
+ add_rte_to_flat_rtable(glob, rte);
+ return false;
+ }
+ if (IsA(node, Query))
+ {
+ /* Recurse into subselects */
+ return query_tree_walker((Query *) node,
+ flatten_rtes_walker,
+ (void *) glob,
+ QTW_EXAMINE_RTES_BEFORE);
+ }
+ return expression_tree_walker(node, flatten_rtes_walker,
+ (void *) glob);
+}
+
+/*
+ * Add (a copy of) the given RTE to the final rangetable
+ *
+ * In the flat rangetable, we zero out substructure pointers that are not
+ * needed by the executor; this reduces the storage space and copying cost
+ * for cached plans. We keep only the ctename, alias and eref Alias fields,
+ * which are needed by EXPLAIN, and the selectedCols, insertedCols,
+ * updatedCols, and extraUpdatedCols bitmaps, which are needed for
+ * executor-startup permissions checking and for trigger event checking.
+ */
+static void
+add_rte_to_flat_rtable(PlannerGlobal *glob, RangeTblEntry *rte)
+{
+ RangeTblEntry *newrte;
+
+ /* flat copy to duplicate all the scalar fields */
+ newrte = (RangeTblEntry *) palloc(sizeof(RangeTblEntry));
+ memcpy(newrte, rte, sizeof(RangeTblEntry));
+
+ /* zap unneeded sub-structure */
+ newrte->tablesample = NULL;
+ newrte->subquery = NULL;
+ newrte->joinaliasvars = NIL;
+ newrte->joinleftcols = NIL;
+ newrte->joinrightcols = NIL;
+ newrte->join_using_alias = NULL;
+ newrte->functions = NIL;
+ newrte->tablefunc = NULL;
+ newrte->values_lists = NIL;
+ newrte->coltypes = NIL;
+ newrte->coltypmods = NIL;
+ newrte->colcollations = NIL;
+ newrte->securityQuals = NIL;
+
+ glob->finalrtable = lappend(glob->finalrtable, newrte);
+
+ /*
+ * If it's a plain relation RTE, add the table to relationOids.
+ *
+ * We do this even though the RTE might be unreferenced in the plan tree;
+ * this would correspond to cases such as views that were expanded, child
+ * tables that were eliminated by constraint exclusion, etc. Schema
+ * invalidation on such a rel must still force rebuilding of the plan.
+ *
+ * Note we don't bother to avoid making duplicate list entries. We could,
+ * but it would probably cost more cycles than it would save.
+ */
+ if (newrte->rtekind == RTE_RELATION)
+ glob->relationOids = lappend_oid(glob->relationOids, newrte->relid);
+}
+
+/*
+ * set_plan_refs: recurse through the Plan nodes of a single subquery level
+ */
+static Plan *
+set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
+{
+ ListCell *l;
+
+ if (plan == NULL)
+ return NULL;
+
+ /* Assign this node a unique ID. */
+ plan->plan_node_id = root->glob->lastPlanNodeId++;
+
+ /*
+ * Plan-type-specific fixes
+ */
+ switch (nodeTag(plan))
+ {
+ case T_SeqScan:
+ {
+ SeqScan *splan = (SeqScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ }
+ break;
+ case T_SampleScan:
+ {
+ SampleScan *splan = (SampleScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ splan->tablesample = (TableSampleClause *)
+ fix_scan_expr(root, (Node *) splan->tablesample,
+ rtoffset, 1);
+ }
+ break;
+ case T_IndexScan:
+ {
+ IndexScan *splan = (IndexScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ splan->indexqual =
+ fix_scan_list(root, splan->indexqual,
+ rtoffset, 1);
+ splan->indexqualorig =
+ fix_scan_list(root, splan->indexqualorig,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ splan->indexorderby =
+ fix_scan_list(root, splan->indexorderby,
+ rtoffset, 1);
+ splan->indexorderbyorig =
+ fix_scan_list(root, splan->indexorderbyorig,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ }
+ break;
+ case T_IndexOnlyScan:
+ {
+ IndexOnlyScan *splan = (IndexOnlyScan *) plan;
+
+ return set_indexonlyscan_references(root, splan, rtoffset);
+ }
+ break;
+ case T_BitmapIndexScan:
+ {
+ BitmapIndexScan *splan = (BitmapIndexScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ /* no need to fix targetlist and qual */
+ Assert(splan->scan.plan.targetlist == NIL);
+ Assert(splan->scan.plan.qual == NIL);
+ splan->indexqual =
+ fix_scan_list(root, splan->indexqual, rtoffset, 1);
+ splan->indexqualorig =
+ fix_scan_list(root, splan->indexqualorig,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ }
+ break;
+ case T_BitmapHeapScan:
+ {
+ BitmapHeapScan *splan = (BitmapHeapScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ splan->bitmapqualorig =
+ fix_scan_list(root, splan->bitmapqualorig,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ }
+ break;
+ case T_TidScan:
+ {
+ TidScan *splan = (TidScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ splan->tidquals =
+ fix_scan_list(root, splan->tidquals,
+ rtoffset, 1);
+ }
+ break;
+ case T_TidRangeScan:
+ {
+ TidRangeScan *splan = (TidRangeScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ splan->tidrangequals =
+ fix_scan_list(root, splan->tidrangequals,
+ rtoffset, 1);
+ }
+ break;
+ case T_SubqueryScan:
+ /* Needs special treatment, see comments below */
+ return set_subqueryscan_references(root,
+ (SubqueryScan *) plan,
+ rtoffset);
+ case T_FunctionScan:
+ {
+ FunctionScan *splan = (FunctionScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ splan->functions =
+ fix_scan_list(root, splan->functions, rtoffset, 1);
+ }
+ break;
+ case T_TableFuncScan:
+ {
+ TableFuncScan *splan = (TableFuncScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ splan->tablefunc = (TableFunc *)
+ fix_scan_expr(root, (Node *) splan->tablefunc,
+ rtoffset, 1);
+ }
+ break;
+ case T_ValuesScan:
+ {
+ ValuesScan *splan = (ValuesScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ splan->values_lists =
+ fix_scan_list(root, splan->values_lists,
+ rtoffset, 1);
+ }
+ break;
+ case T_CteScan:
+ {
+ CteScan *splan = (CteScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ }
+ break;
+ case T_NamedTuplestoreScan:
+ {
+ NamedTuplestoreScan *splan = (NamedTuplestoreScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ }
+ break;
+ case T_WorkTableScan:
+ {
+ WorkTableScan *splan = (WorkTableScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ }
+ break;
+ case T_ForeignScan:
+ set_foreignscan_references(root, (ForeignScan *) plan, rtoffset);
+ break;
+ case T_CustomScan:
+ set_customscan_references(root, (CustomScan *) plan, rtoffset);
+ break;
+
+ case T_NestLoop:
+ case T_MergeJoin:
+ case T_HashJoin:
+ set_join_references(root, (Join *) plan, rtoffset);
+ break;
+
+ case T_Gather:
+ case T_GatherMerge:
+ {
+ set_upper_references(root, plan, rtoffset);
+ set_param_references(root, plan);
+ }
+ break;
+
+ case T_Hash:
+ set_hash_references(root, plan, rtoffset);
+ break;
+
+ case T_Memoize:
+ {
+ Memoize *mplan = (Memoize *) plan;
+
+ /*
+ * Memoize does not evaluate its targetlist. It just uses the
+ * same targetlist from its outer subnode.
+ */
+ set_dummy_tlist_references(plan, rtoffset);
+
+ mplan->param_exprs = fix_scan_list(root, mplan->param_exprs,
+ rtoffset,
+ NUM_EXEC_TLIST(plan));
+ break;
+ }
+
+ case T_Material:
+ case T_Sort:
+ case T_IncrementalSort:
+ case T_Unique:
+ case T_SetOp:
+
+ /*
+ * These plan types don't actually bother to evaluate their
+ * targetlists, because they just return their unmodified input
+ * tuples. Even though the targetlist won't be used by the
+ * executor, we fix it up for possible use by EXPLAIN (not to
+ * mention ease of debugging --- wrong varnos are very confusing).
+ */
+ set_dummy_tlist_references(plan, rtoffset);
+
+ /*
+ * Since these plan types don't check quals either, we should not
+ * find any qual expression attached to them.
+ */
+ Assert(plan->qual == NIL);
+ break;
+ case T_LockRows:
+ {
+ LockRows *splan = (LockRows *) plan;
+
+ /*
+ * Like the plan types above, LockRows doesn't evaluate its
+ * tlist or quals. But we have to fix up the RT indexes in
+ * its rowmarks.
+ */
+ set_dummy_tlist_references(plan, rtoffset);
+ Assert(splan->plan.qual == NIL);
+
+ foreach(l, splan->rowMarks)
+ {
+ PlanRowMark *rc = (PlanRowMark *) lfirst(l);
+
+ rc->rti += rtoffset;
+ rc->prti += rtoffset;
+ }
+ }
+ break;
+ case T_Limit:
+ {
+ Limit *splan = (Limit *) plan;
+
+ /*
+ * Like the plan types above, Limit doesn't evaluate its tlist
+ * or quals. It does have live expressions for limit/offset,
+ * however; and those cannot contain subplan variable refs, so
+ * fix_scan_expr works for them.
+ */
+ set_dummy_tlist_references(plan, rtoffset);
+ Assert(splan->plan.qual == NIL);
+
+ splan->limitOffset =
+ fix_scan_expr(root, splan->limitOffset, rtoffset, 1);
+ splan->limitCount =
+ fix_scan_expr(root, splan->limitCount, rtoffset, 1);
+ }
+ break;
+ case T_Agg:
+ {
+ Agg *agg = (Agg *) plan;
+
+ /*
+ * If this node is combining partial-aggregation results, we
+ * must convert its Aggrefs to contain references to the
+ * partial-aggregate subexpressions that will be available
+ * from the child plan node.
+ */
+ if (DO_AGGSPLIT_COMBINE(agg->aggsplit))
+ {
+ plan->targetlist = (List *)
+ convert_combining_aggrefs((Node *) plan->targetlist,
+ NULL);
+ plan->qual = (List *)
+ convert_combining_aggrefs((Node *) plan->qual,
+ NULL);
+ }
+
+ set_upper_references(root, plan, rtoffset);
+ }
+ break;
+ case T_Group:
+ set_upper_references(root, plan, rtoffset);
+ break;
+ case T_WindowAgg:
+ {
+ WindowAgg *wplan = (WindowAgg *) plan;
+
+ /*
+ * Adjust the WindowAgg's run conditions by swapping the
+ * WindowFuncs references out to instead reference the Var in
+ * the scan slot so that when the executor evaluates the
+ * runCondition, it receives the WindowFunc's value from the
+ * slot that the result has just been stored into rather than
+ * evaluating the WindowFunc all over again.
+ */
+ wplan->runCondition = set_windowagg_runcondition_references(root,
+ wplan->runCondition,
+ (Plan *) wplan);
+
+ set_upper_references(root, plan, rtoffset);
+
+ /*
+ * Like Limit node limit/offset expressions, WindowAgg has
+ * frame offset expressions, which cannot contain subplan
+ * variable refs, so fix_scan_expr works for them.
+ */
+ wplan->startOffset =
+ fix_scan_expr(root, wplan->startOffset, rtoffset, 1);
+ wplan->endOffset =
+ fix_scan_expr(root, wplan->endOffset, rtoffset, 1);
+ wplan->runCondition = fix_scan_list(root,
+ wplan->runCondition,
+ rtoffset,
+ NUM_EXEC_TLIST(plan));
+ wplan->runConditionOrig = fix_scan_list(root,
+ wplan->runConditionOrig,
+ rtoffset,
+ NUM_EXEC_TLIST(plan));
+ }
+ break;
+ case T_Result:
+ {
+ Result *splan = (Result *) plan;
+
+ /*
+ * Result may or may not have a subplan; if not, it's more
+ * like a scan node than an upper node.
+ */
+ if (splan->plan.lefttree != NULL)
+ set_upper_references(root, plan, rtoffset);
+ else
+ {
+ /*
+ * The tlist of a childless Result could contain
+ * unresolved ROWID_VAR Vars, in case it's representing a
+ * target relation which is completely empty because of
+ * constraint exclusion. Replace any such Vars by null
+ * constants, as though they'd been resolved for a leaf
+ * scan node that doesn't support them. We could have
+ * fix_scan_expr do this, but since the case is only
+ * expected to occur here, it seems safer to special-case
+ * it here and keep the assertions that ROWID_VARs
+ * shouldn't be seen by fix_scan_expr.
+ */
+ foreach(l, splan->plan.targetlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+ Var *var = (Var *) tle->expr;
+
+ if (var && IsA(var, Var) && var->varno == ROWID_VAR)
+ tle->expr = (Expr *) makeNullConst(var->vartype,
+ var->vartypmod,
+ var->varcollid);
+ }
+
+ splan->plan.targetlist =
+ fix_scan_list(root, splan->plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST(plan));
+ splan->plan.qual =
+ fix_scan_list(root, splan->plan.qual,
+ rtoffset, NUM_EXEC_QUAL(plan));
+ }
+ /* resconstantqual can't contain any subplan variable refs */
+ splan->resconstantqual =
+ fix_scan_expr(root, splan->resconstantqual, rtoffset, 1);
+ }
+ break;
+ case T_ProjectSet:
+ set_upper_references(root, plan, rtoffset);
+ break;
+ case T_ModifyTable:
+ {
+ ModifyTable *splan = (ModifyTable *) plan;
+ Plan *subplan = outerPlan(splan);
+
+ Assert(splan->plan.targetlist == NIL);
+ Assert(splan->plan.qual == NIL);
+
+ splan->withCheckOptionLists =
+ fix_scan_list(root, splan->withCheckOptionLists,
+ rtoffset, 1);
+
+ if (splan->returningLists)
+ {
+ List *newRL = NIL;
+ ListCell *lcrl,
+ *lcrr;
+
+ /*
+ * Pass each per-resultrel returningList through
+ * set_returning_clause_references().
+ */
+ Assert(list_length(splan->returningLists) == list_length(splan->resultRelations));
+ forboth(lcrl, splan->returningLists,
+ lcrr, splan->resultRelations)
+ {
+ List *rlist = (List *) lfirst(lcrl);
+ Index resultrel = lfirst_int(lcrr);
+
+ rlist = set_returning_clause_references(root,
+ rlist,
+ subplan,
+ resultrel,
+ rtoffset);
+ newRL = lappend(newRL, rlist);
+ }
+ splan->returningLists = newRL;
+
+ /*
+ * Set up the visible plan targetlist as being the same as
+ * the first RETURNING list. This is for the use of
+ * EXPLAIN; the executor won't pay any attention to the
+ * targetlist. We postpone this step until here so that
+ * we don't have to do set_returning_clause_references()
+ * twice on identical targetlists.
+ */
+ splan->plan.targetlist = copyObject(linitial(newRL));
+ }
+
+ /*
+ * We treat ModifyTable with ON CONFLICT as a form of 'pseudo
+ * join', where the inner side is the EXCLUDED tuple.
+ * Therefore use fix_join_expr to setup the relevant variables
+ * to INNER_VAR. We explicitly don't create any OUTER_VARs as
+ * those are already used by RETURNING and it seems better to
+ * be non-conflicting.
+ */
+ if (splan->onConflictSet)
+ {
+ indexed_tlist *itlist;
+
+ itlist = build_tlist_index(splan->exclRelTlist);
+
+ splan->onConflictSet =
+ fix_join_expr(root, splan->onConflictSet,
+ NULL, itlist,
+ linitial_int(splan->resultRelations),
+ rtoffset, NUM_EXEC_QUAL(plan));
+
+ splan->onConflictWhere = (Node *)
+ fix_join_expr(root, (List *) splan->onConflictWhere,
+ NULL, itlist,
+ linitial_int(splan->resultRelations),
+ rtoffset, NUM_EXEC_QUAL(plan));
+
+ pfree(itlist);
+
+ splan->exclRelTlist =
+ fix_scan_list(root, splan->exclRelTlist, rtoffset, 1);
+ }
+
+ /*
+ * The MERGE statement produces the target rows by performing
+ * a right join between the target relation and the source
+ * relation (which could be a plain relation or a subquery).
+ * The INSERT and UPDATE actions of the MERGE statement
+ * require access to the columns from the source relation. We
+ * arrange things so that the source relation attributes are
+ * available as INNER_VAR and the target relation attributes
+ * are available from the scan tuple.
+ */
+ if (splan->mergeActionLists != NIL)
+ {
+ ListCell *lca,
+ *lcr;
+
+ /*
+ * Fix the targetList of individual action nodes so that
+ * the so-called "source relation" Vars are referenced as
+ * INNER_VAR. Note that for this to work correctly during
+ * execution, the ecxt_innertuple must be set to the tuple
+ * obtained by executing the subplan, which is what
+ * constitutes the "source relation".
+ *
+ * We leave the Vars from the result relation (i.e. the
+ * target relation) unchanged i.e. those Vars would be
+ * picked from the scan slot. So during execution, we must
+ * ensure that ecxt_scantuple is setup correctly to refer
+ * to the tuple from the target relation.
+ */
+ indexed_tlist *itlist;
+
+ itlist = build_tlist_index(subplan->targetlist);
+
+ forboth(lca, splan->mergeActionLists,
+ lcr, splan->resultRelations)
+ {
+ List *mergeActionList = lfirst(lca);
+ Index resultrel = lfirst_int(lcr);
+
+ foreach(l, mergeActionList)
+ {
+ MergeAction *action = (MergeAction *) lfirst(l);
+
+ /* Fix targetList of each action. */
+ action->targetList = fix_join_expr(root,
+ action->targetList,
+ NULL, itlist,
+ resultrel,
+ rtoffset,
+ NUM_EXEC_TLIST(plan));
+
+ /* Fix quals too. */
+ action->qual = (Node *) fix_join_expr(root,
+ (List *) action->qual,
+ NULL, itlist,
+ resultrel,
+ rtoffset,
+ NUM_EXEC_QUAL(plan));
+ }
+ }
+ }
+
+ splan->nominalRelation += rtoffset;
+ if (splan->rootRelation)
+ splan->rootRelation += rtoffset;
+ splan->exclRelRTI += rtoffset;
+
+ foreach(l, splan->resultRelations)
+ {
+ lfirst_int(l) += rtoffset;
+ }
+ foreach(l, splan->rowMarks)
+ {
+ PlanRowMark *rc = (PlanRowMark *) lfirst(l);
+
+ rc->rti += rtoffset;
+ rc->prti += rtoffset;
+ }
+
+ /*
+ * Append this ModifyTable node's final result relation RT
+ * index(es) to the global list for the plan.
+ */
+ root->glob->resultRelations =
+ list_concat(root->glob->resultRelations,
+ splan->resultRelations);
+ if (splan->rootRelation)
+ {
+ root->glob->resultRelations =
+ lappend_int(root->glob->resultRelations,
+ splan->rootRelation);
+ }
+ }
+ break;
+ case T_Append:
+ /* Needs special treatment, see comments below */
+ return set_append_references(root,
+ (Append *) plan,
+ rtoffset);
+ case T_MergeAppend:
+ /* Needs special treatment, see comments below */
+ return set_mergeappend_references(root,
+ (MergeAppend *) plan,
+ rtoffset);
+ case T_RecursiveUnion:
+ /* This doesn't evaluate targetlist or check quals either */
+ set_dummy_tlist_references(plan, rtoffset);
+ Assert(plan->qual == NIL);
+ break;
+ case T_BitmapAnd:
+ {
+ BitmapAnd *splan = (BitmapAnd *) plan;
+
+ /* BitmapAnd works like Append, but has no tlist */
+ Assert(splan->plan.targetlist == NIL);
+ Assert(splan->plan.qual == NIL);
+ foreach(l, splan->bitmapplans)
+ {
+ lfirst(l) = set_plan_refs(root,
+ (Plan *) lfirst(l),
+ rtoffset);
+ }
+ }
+ break;
+ case T_BitmapOr:
+ {
+ BitmapOr *splan = (BitmapOr *) plan;
+
+ /* BitmapOr works like Append, but has no tlist */
+ Assert(splan->plan.targetlist == NIL);
+ Assert(splan->plan.qual == NIL);
+ foreach(l, splan->bitmapplans)
+ {
+ lfirst(l) = set_plan_refs(root,
+ (Plan *) lfirst(l),
+ rtoffset);
+ }
+ }
+ break;
+ default:
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(plan));
+ break;
+ }
+
+ /*
+ * Now recurse into child plans, if any
+ *
+ * NOTE: it is essential that we recurse into child plans AFTER we set
+ * subplan references in this plan's tlist and quals. If we did the
+ * reference-adjustments bottom-up, then we would fail to match this
+ * plan's var nodes against the already-modified nodes of the children.
+ */
+ plan->lefttree = set_plan_refs(root, plan->lefttree, rtoffset);
+ plan->righttree = set_plan_refs(root, plan->righttree, rtoffset);
+
+ return plan;
+}
+
+/*
+ * set_indexonlyscan_references
+ * Do set_plan_references processing on an IndexOnlyScan
+ *
+ * This is unlike the handling of a plain IndexScan because we have to
+ * convert Vars referencing the heap into Vars referencing the index.
+ * We can use the fix_upper_expr machinery for that, by working from a
+ * targetlist describing the index columns.
+ */
+static Plan *
+set_indexonlyscan_references(PlannerInfo *root,
+ IndexOnlyScan *plan,
+ int rtoffset)
+{
+ indexed_tlist *index_itlist;
+ List *stripped_indextlist;
+ ListCell *lc;
+
+ /*
+ * Vars in the plan node's targetlist, qual, and recheckqual must only
+ * reference columns that the index AM can actually return. To ensure
+ * this, remove non-returnable columns (which are marked as resjunk) from
+ * the indexed tlist. We can just drop them because the indexed_tlist
+ * machinery pays attention to TLE resnos, not physical list position.
+ */
+ stripped_indextlist = NIL;
+ foreach(lc, plan->indextlist)
+ {
+ TargetEntry *indextle = (TargetEntry *) lfirst(lc);
+
+ if (!indextle->resjunk)
+ stripped_indextlist = lappend(stripped_indextlist, indextle);
+ }
+
+ index_itlist = build_tlist_index(stripped_indextlist);
+
+ plan->scan.scanrelid += rtoffset;
+ plan->scan.plan.targetlist = (List *)
+ fix_upper_expr(root,
+ (Node *) plan->scan.plan.targetlist,
+ index_itlist,
+ INDEX_VAR,
+ rtoffset,
+ NUM_EXEC_TLIST((Plan *) plan));
+ plan->scan.plan.qual = (List *)
+ fix_upper_expr(root,
+ (Node *) plan->scan.plan.qual,
+ index_itlist,
+ INDEX_VAR,
+ rtoffset,
+ NUM_EXEC_QUAL((Plan *) plan));
+ plan->recheckqual = (List *)
+ fix_upper_expr(root,
+ (Node *) plan->recheckqual,
+ index_itlist,
+ INDEX_VAR,
+ rtoffset,
+ NUM_EXEC_QUAL((Plan *) plan));
+ /* indexqual is already transformed to reference index columns */
+ plan->indexqual = fix_scan_list(root, plan->indexqual,
+ rtoffset, 1);
+ /* indexorderby is already transformed to reference index columns */
+ plan->indexorderby = fix_scan_list(root, plan->indexorderby,
+ rtoffset, 1);
+ /* indextlist must NOT be transformed to reference index columns */
+ plan->indextlist = fix_scan_list(root, plan->indextlist,
+ rtoffset, NUM_EXEC_TLIST((Plan *) plan));
+
+ pfree(index_itlist);
+
+ return (Plan *) plan;
+}
+
+/*
+ * set_subqueryscan_references
+ * Do set_plan_references processing on a SubqueryScan
+ *
+ * We try to strip out the SubqueryScan entirely; if we can't, we have
+ * to do the normal processing on it.
+ */
+static Plan *
+set_subqueryscan_references(PlannerInfo *root,
+ SubqueryScan *plan,
+ int rtoffset)
+{
+ RelOptInfo *rel;
+ Plan *result;
+
+ /* Need to look up the subquery's RelOptInfo, since we need its subroot */
+ rel = find_base_rel(root, plan->scan.scanrelid);
+
+ /* Recursively process the subplan */
+ plan->subplan = set_plan_references(rel->subroot, plan->subplan);
+
+ if (trivial_subqueryscan(plan))
+ {
+ /*
+ * We can omit the SubqueryScan node and just pull up the subplan.
+ */
+ result = clean_up_removed_plan_level((Plan *) plan, plan->subplan);
+ }
+ else
+ {
+ /*
+ * Keep the SubqueryScan node. We have to do the processing that
+ * set_plan_references would otherwise have done on it. Notice we do
+ * not do set_upper_references() here, because a SubqueryScan will
+ * always have been created with correct references to its subplan's
+ * outputs to begin with.
+ */
+ plan->scan.scanrelid += rtoffset;
+ plan->scan.plan.targetlist =
+ fix_scan_list(root, plan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST((Plan *) plan));
+ plan->scan.plan.qual =
+ fix_scan_list(root, plan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL((Plan *) plan));
+
+ result = (Plan *) plan;
+ }
+
+ return result;
+}
+
+/*
+ * trivial_subqueryscan
+ * Detect whether a SubqueryScan can be deleted from the plan tree.
+ *
+ * We can delete it if it has no qual to check and the targetlist just
+ * regurgitates the output of the child plan.
+ *
+ * This can be called from mark_async_capable_plan(), a helper function for
+ * create_append_plan(), before set_subqueryscan_references(), to determine
+ * triviality of a SubqueryScan that is a child of an Append node. So we
+ * cache the result in the SubqueryScan node to avoid repeated computation.
+ *
+ * Note: when called from mark_async_capable_plan(), we determine the result
+ * before running finalize_plan() on the SubqueryScan node (if needed) and
+ * set_plan_references() on the subplan tree, but this would be safe, because
+ * 1) finalize_plan() doesn't modify the tlist or quals for the SubqueryScan
+ * node (or that for any plan node in the subplan tree), and
+ * 2) set_plan_references() modifies the tlist for every plan node in the
+ * subplan tree, but keeps const/resjunk columns as const/resjunk ones and
+ * preserves the length and order of the tlist, and
+ * 3) set_plan_references() might delete the topmost plan node like an Append
+ * or MergeAppend from the subplan tree and pull up the child plan node,
+ * but in that case, the tlist for the child plan node exactly matches the
+ * parent.
+ */
+bool
+trivial_subqueryscan(SubqueryScan *plan)
+{
+ int attrno;
+ ListCell *lp,
+ *lc;
+
+ /* We might have detected this already; in which case reuse the result */
+ if (plan->scanstatus == SUBQUERY_SCAN_TRIVIAL)
+ return true;
+ if (plan->scanstatus == SUBQUERY_SCAN_NONTRIVIAL)
+ return false;
+ Assert(plan->scanstatus == SUBQUERY_SCAN_UNKNOWN);
+ /* Initially, mark the SubqueryScan as non-deletable from the plan tree */
+ plan->scanstatus = SUBQUERY_SCAN_NONTRIVIAL;
+
+ if (plan->scan.plan.qual != NIL)
+ return false;
+
+ if (list_length(plan->scan.plan.targetlist) !=
+ list_length(plan->subplan->targetlist))
+ return false; /* tlists not same length */
+
+ attrno = 1;
+ forboth(lp, plan->scan.plan.targetlist, lc, plan->subplan->targetlist)
+ {
+ TargetEntry *ptle = (TargetEntry *) lfirst(lp);
+ TargetEntry *ctle = (TargetEntry *) lfirst(lc);
+
+ if (ptle->resjunk != ctle->resjunk)
+ return false; /* tlist doesn't match junk status */
+
+ /*
+ * We accept either a Var referencing the corresponding element of the
+ * subplan tlist, or a Const equaling the subplan element. See
+ * generate_setop_tlist() for motivation.
+ */
+ if (ptle->expr && IsA(ptle->expr, Var))
+ {
+ Var *var = (Var *) ptle->expr;
+
+ Assert(var->varno == plan->scan.scanrelid);
+ Assert(var->varlevelsup == 0);
+ if (var->varattno != attrno)
+ return false; /* out of order */
+ }
+ else if (ptle->expr && IsA(ptle->expr, Const))
+ {
+ if (!equal(ptle->expr, ctle->expr))
+ return false;
+ }
+ else
+ return false;
+
+ attrno++;
+ }
+
+ /* Re-mark the SubqueryScan as deletable from the plan tree */
+ plan->scanstatus = SUBQUERY_SCAN_TRIVIAL;
+
+ return true;
+}
+
+/*
+ * clean_up_removed_plan_level
+ * Do necessary cleanup when we strip out a SubqueryScan, Append, etc
+ *
+ * We are dropping the "parent" plan in favor of returning just its "child".
+ * A few small tweaks are needed.
+ */
+static Plan *
+clean_up_removed_plan_level(Plan *parent, Plan *child)
+{
+ /*
+ * We have to be sure we don't lose any initplans, so move any that were
+ * attached to the parent plan to the child. If we do move any, the child
+ * is no longer parallel-safe.
+ */
+ if (parent->initPlan)
+ child->parallel_safe = false;
+
+ /*
+ * Attach plans this way so that parent's initplans are processed before
+ * any pre-existing initplans of the child. Probably doesn't matter, but
+ * let's preserve the ordering just in case.
+ */
+ child->initPlan = list_concat(parent->initPlan,
+ child->initPlan);
+
+ /*
+ * We also have to transfer the parent's column labeling info into the
+ * child, else columns sent to client will be improperly labeled if this
+ * is the topmost plan level. resjunk and so on may be important too.
+ */
+ apply_tlist_labeling(child->targetlist, parent->targetlist);
+
+ return child;
+}
+
+/*
+ * set_foreignscan_references
+ * Do set_plan_references processing on a ForeignScan
+ */
+static void
+set_foreignscan_references(PlannerInfo *root,
+ ForeignScan *fscan,
+ int rtoffset)
+{
+ /* Adjust scanrelid if it's valid */
+ if (fscan->scan.scanrelid > 0)
+ fscan->scan.scanrelid += rtoffset;
+
+ if (fscan->fdw_scan_tlist != NIL || fscan->scan.scanrelid == 0)
+ {
+ /*
+ * Adjust tlist, qual, fdw_exprs, fdw_recheck_quals to reference
+ * foreign scan tuple
+ */
+ indexed_tlist *itlist = build_tlist_index(fscan->fdw_scan_tlist);
+
+ fscan->scan.plan.targetlist = (List *)
+ fix_upper_expr(root,
+ (Node *) fscan->scan.plan.targetlist,
+ itlist,
+ INDEX_VAR,
+ rtoffset,
+ NUM_EXEC_TLIST((Plan *) fscan));
+ fscan->scan.plan.qual = (List *)
+ fix_upper_expr(root,
+ (Node *) fscan->scan.plan.qual,
+ itlist,
+ INDEX_VAR,
+ rtoffset,
+ NUM_EXEC_QUAL((Plan *) fscan));
+ fscan->fdw_exprs = (List *)
+ fix_upper_expr(root,
+ (Node *) fscan->fdw_exprs,
+ itlist,
+ INDEX_VAR,
+ rtoffset,
+ NUM_EXEC_QUAL((Plan *) fscan));
+ fscan->fdw_recheck_quals = (List *)
+ fix_upper_expr(root,
+ (Node *) fscan->fdw_recheck_quals,
+ itlist,
+ INDEX_VAR,
+ rtoffset,
+ NUM_EXEC_QUAL((Plan *) fscan));
+ pfree(itlist);
+ /* fdw_scan_tlist itself just needs fix_scan_list() adjustments */
+ fscan->fdw_scan_tlist =
+ fix_scan_list(root, fscan->fdw_scan_tlist,
+ rtoffset, NUM_EXEC_TLIST((Plan *) fscan));
+ }
+ else
+ {
+ /*
+ * Adjust tlist, qual, fdw_exprs, fdw_recheck_quals in the standard
+ * way
+ */
+ fscan->scan.plan.targetlist =
+ fix_scan_list(root, fscan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST((Plan *) fscan));
+ fscan->scan.plan.qual =
+ fix_scan_list(root, fscan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL((Plan *) fscan));
+ fscan->fdw_exprs =
+ fix_scan_list(root, fscan->fdw_exprs,
+ rtoffset, NUM_EXEC_QUAL((Plan *) fscan));
+ fscan->fdw_recheck_quals =
+ fix_scan_list(root, fscan->fdw_recheck_quals,
+ rtoffset, NUM_EXEC_QUAL((Plan *) fscan));
+ }
+
+ fscan->fs_relids = offset_relid_set(fscan->fs_relids, rtoffset);
+
+ /* Adjust resultRelation if it's valid */
+ if (fscan->resultRelation > 0)
+ fscan->resultRelation += rtoffset;
+}
+
+/*
+ * set_customscan_references
+ * Do set_plan_references processing on a CustomScan
+ */
+static void
+set_customscan_references(PlannerInfo *root,
+ CustomScan *cscan,
+ int rtoffset)
+{
+ ListCell *lc;
+
+ /* Adjust scanrelid if it's valid */
+ if (cscan->scan.scanrelid > 0)
+ cscan->scan.scanrelid += rtoffset;
+
+ if (cscan->custom_scan_tlist != NIL || cscan->scan.scanrelid == 0)
+ {
+ /* Adjust tlist, qual, custom_exprs to reference custom scan tuple */
+ indexed_tlist *itlist = build_tlist_index(cscan->custom_scan_tlist);
+
+ cscan->scan.plan.targetlist = (List *)
+ fix_upper_expr(root,
+ (Node *) cscan->scan.plan.targetlist,
+ itlist,
+ INDEX_VAR,
+ rtoffset,
+ NUM_EXEC_TLIST((Plan *) cscan));
+ cscan->scan.plan.qual = (List *)
+ fix_upper_expr(root,
+ (Node *) cscan->scan.plan.qual,
+ itlist,
+ INDEX_VAR,
+ rtoffset,
+ NUM_EXEC_QUAL((Plan *) cscan));
+ cscan->custom_exprs = (List *)
+ fix_upper_expr(root,
+ (Node *) cscan->custom_exprs,
+ itlist,
+ INDEX_VAR,
+ rtoffset,
+ NUM_EXEC_QUAL((Plan *) cscan));
+ pfree(itlist);
+ /* custom_scan_tlist itself just needs fix_scan_list() adjustments */
+ cscan->custom_scan_tlist =
+ fix_scan_list(root, cscan->custom_scan_tlist,
+ rtoffset, NUM_EXEC_TLIST((Plan *) cscan));
+ }
+ else
+ {
+ /* Adjust tlist, qual, custom_exprs in the standard way */
+ cscan->scan.plan.targetlist =
+ fix_scan_list(root, cscan->scan.plan.targetlist,
+ rtoffset, NUM_EXEC_TLIST((Plan *) cscan));
+ cscan->scan.plan.qual =
+ fix_scan_list(root, cscan->scan.plan.qual,
+ rtoffset, NUM_EXEC_QUAL((Plan *) cscan));
+ cscan->custom_exprs =
+ fix_scan_list(root, cscan->custom_exprs,
+ rtoffset, NUM_EXEC_QUAL((Plan *) cscan));
+ }
+
+ /* Adjust child plan-nodes recursively, if needed */
+ foreach(lc, cscan->custom_plans)
+ {
+ lfirst(lc) = set_plan_refs(root, (Plan *) lfirst(lc), rtoffset);
+ }
+
+ cscan->custom_relids = offset_relid_set(cscan->custom_relids, rtoffset);
+}
+
+/*
+ * set_append_references
+ * Do set_plan_references processing on an Append
+ *
+ * We try to strip out the Append entirely; if we can't, we have
+ * to do the normal processing on it.
+ */
+static Plan *
+set_append_references(PlannerInfo *root,
+ Append *aplan,
+ int rtoffset)
+{
+ ListCell *l;
+
+ /*
+ * Append, like Sort et al, doesn't actually evaluate its targetlist or
+ * check quals. If it's got exactly one child plan, then it's not doing
+ * anything useful at all, and we can strip it out.
+ */
+ Assert(aplan->plan.qual == NIL);
+
+ /* First, we gotta recurse on the children */
+ foreach(l, aplan->appendplans)
+ {
+ lfirst(l) = set_plan_refs(root, (Plan *) lfirst(l), rtoffset);
+ }
+
+ /*
+ * See if it's safe to get rid of the Append entirely. For this to be
+ * safe, there must be only one child plan and that child plan's parallel
+ * awareness must match that of the Append's. The reason for the latter
+ * is that the if the Append is parallel aware and the child is not then
+ * the calling plan may execute the non-parallel aware child multiple
+ * times.
+ */
+ if (list_length(aplan->appendplans) == 1 &&
+ ((Plan *) linitial(aplan->appendplans))->parallel_aware == aplan->plan.parallel_aware)
+ return clean_up_removed_plan_level((Plan *) aplan,
+ (Plan *) linitial(aplan->appendplans));
+
+ /*
+ * Otherwise, clean up the Append as needed. It's okay to do this after
+ * recursing to the children, because set_dummy_tlist_references doesn't
+ * look at those.
+ */
+ set_dummy_tlist_references((Plan *) aplan, rtoffset);
+
+ aplan->apprelids = offset_relid_set(aplan->apprelids, rtoffset);
+
+ if (aplan->part_prune_info)
+ {
+ foreach(l, aplan->part_prune_info->prune_infos)
+ {
+ List *prune_infos = lfirst(l);
+ ListCell *l2;
+
+ foreach(l2, prune_infos)
+ {
+ PartitionedRelPruneInfo *pinfo = lfirst(l2);
+
+ pinfo->rtindex += rtoffset;
+ }
+ }
+ }
+
+ /* We don't need to recurse to lefttree or righttree ... */
+ Assert(aplan->plan.lefttree == NULL);
+ Assert(aplan->plan.righttree == NULL);
+
+ return (Plan *) aplan;
+}
+
+/*
+ * set_mergeappend_references
+ * Do set_plan_references processing on a MergeAppend
+ *
+ * We try to strip out the MergeAppend entirely; if we can't, we have
+ * to do the normal processing on it.
+ */
+static Plan *
+set_mergeappend_references(PlannerInfo *root,
+ MergeAppend *mplan,
+ int rtoffset)
+{
+ ListCell *l;
+
+ /*
+ * MergeAppend, like Sort et al, doesn't actually evaluate its targetlist
+ * or check quals. If it's got exactly one child plan, then it's not
+ * doing anything useful at all, and we can strip it out.
+ */
+ Assert(mplan->plan.qual == NIL);
+
+ /* First, we gotta recurse on the children */
+ foreach(l, mplan->mergeplans)
+ {
+ lfirst(l) = set_plan_refs(root, (Plan *) lfirst(l), rtoffset);
+ }
+
+ /*
+ * See if it's safe to get rid of the MergeAppend entirely. For this to
+ * be safe, there must be only one child plan and that child plan's
+ * parallel awareness must match that of the MergeAppend's. The reason
+ * for the latter is that the if the MergeAppend is parallel aware and the
+ * child is not then the calling plan may execute the non-parallel aware
+ * child multiple times.
+ */
+ if (list_length(mplan->mergeplans) == 1 &&
+ ((Plan *) linitial(mplan->mergeplans))->parallel_aware == mplan->plan.parallel_aware)
+ return clean_up_removed_plan_level((Plan *) mplan,
+ (Plan *) linitial(mplan->mergeplans));
+
+ /*
+ * Otherwise, clean up the MergeAppend as needed. It's okay to do this
+ * after recursing to the children, because set_dummy_tlist_references
+ * doesn't look at those.
+ */
+ set_dummy_tlist_references((Plan *) mplan, rtoffset);
+
+ mplan->apprelids = offset_relid_set(mplan->apprelids, rtoffset);
+
+ if (mplan->part_prune_info)
+ {
+ foreach(l, mplan->part_prune_info->prune_infos)
+ {
+ List *prune_infos = lfirst(l);
+ ListCell *l2;
+
+ foreach(l2, prune_infos)
+ {
+ PartitionedRelPruneInfo *pinfo = lfirst(l2);
+
+ pinfo->rtindex += rtoffset;
+ }
+ }
+ }
+
+ /* We don't need to recurse to lefttree or righttree ... */
+ Assert(mplan->plan.lefttree == NULL);
+ Assert(mplan->plan.righttree == NULL);
+
+ return (Plan *) mplan;
+}
+
+/*
+ * set_hash_references
+ * Do set_plan_references processing on a Hash node
+ */
+static void
+set_hash_references(PlannerInfo *root, Plan *plan, int rtoffset)
+{
+ Hash *hplan = (Hash *) plan;
+ Plan *outer_plan = plan->lefttree;
+ indexed_tlist *outer_itlist;
+
+ /*
+ * Hash's hashkeys are used when feeding tuples into the hashtable,
+ * therefore have them reference Hash's outer plan (which itself is the
+ * inner plan of the HashJoin).
+ */
+ outer_itlist = build_tlist_index(outer_plan->targetlist);
+ hplan->hashkeys = (List *)
+ fix_upper_expr(root,
+ (Node *) hplan->hashkeys,
+ outer_itlist,
+ OUTER_VAR,
+ rtoffset,
+ NUM_EXEC_QUAL(plan));
+
+ /* Hash doesn't project */
+ set_dummy_tlist_references(plan, rtoffset);
+
+ /* Hash nodes don't have their own quals */
+ Assert(plan->qual == NIL);
+}
+
+/*
+ * offset_relid_set
+ * Apply rtoffset to the members of a Relids set.
+ */
+static Relids
+offset_relid_set(Relids relids, int rtoffset)
+{
+ Relids result = NULL;
+ int rtindex;
+
+ /* If there's no offset to apply, we needn't recompute the value */
+ if (rtoffset == 0)
+ return relids;
+ rtindex = -1;
+ while ((rtindex = bms_next_member(relids, rtindex)) >= 0)
+ result = bms_add_member(result, rtindex + rtoffset);
+ return result;
+}
+
+/*
+ * copyVar
+ * Copy a Var node.
+ *
+ * fix_scan_expr and friends do this enough times that it's worth having
+ * a bespoke routine instead of using the generic copyObject() function.
+ */
+static inline Var *
+copyVar(Var *var)
+{
+ Var *newvar = (Var *) palloc(sizeof(Var));
+
+ *newvar = *var;
+ return newvar;
+}
+
+/*
+ * fix_expr_common
+ * Do generic set_plan_references processing on an expression node
+ *
+ * This is code that is common to all variants of expression-fixing.
+ * We must look up operator opcode info for OpExpr and related nodes,
+ * add OIDs from regclass Const nodes into root->glob->relationOids, and
+ * add PlanInvalItems for user-defined functions into root->glob->invalItems.
+ * We also fill in column index lists for GROUPING() expressions.
+ *
+ * We assume it's okay to update opcode info in-place. So this could possibly
+ * scribble on the planner's input data structures, but it's OK.
+ */
+static void
+fix_expr_common(PlannerInfo *root, Node *node)
+{
+ /* We assume callers won't call us on a NULL pointer */
+ if (IsA(node, Aggref))
+ {
+ record_plan_function_dependency(root,
+ ((Aggref *) node)->aggfnoid);
+ }
+ else if (IsA(node, WindowFunc))
+ {
+ record_plan_function_dependency(root,
+ ((WindowFunc *) node)->winfnoid);
+ }
+ else if (IsA(node, FuncExpr))
+ {
+ record_plan_function_dependency(root,
+ ((FuncExpr *) node)->funcid);
+ }
+ else if (IsA(node, OpExpr))
+ {
+ set_opfuncid((OpExpr *) node);
+ record_plan_function_dependency(root,
+ ((OpExpr *) node)->opfuncid);
+ }
+ else if (IsA(node, DistinctExpr))
+ {
+ set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
+ record_plan_function_dependency(root,
+ ((DistinctExpr *) node)->opfuncid);
+ }
+ else if (IsA(node, NullIfExpr))
+ {
+ set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
+ record_plan_function_dependency(root,
+ ((NullIfExpr *) node)->opfuncid);
+ }
+ else if (IsA(node, ScalarArrayOpExpr))
+ {
+ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
+
+ set_sa_opfuncid(saop);
+ record_plan_function_dependency(root, saop->opfuncid);
+
+ if (OidIsValid(saop->hashfuncid))
+ record_plan_function_dependency(root, saop->hashfuncid);
+
+ if (OidIsValid(saop->negfuncid))
+ record_plan_function_dependency(root, saop->negfuncid);
+ }
+ else if (IsA(node, Const))
+ {
+ Const *con = (Const *) node;
+
+ /* Check for regclass reference */
+ if (ISREGCLASSCONST(con))
+ root->glob->relationOids =
+ lappend_oid(root->glob->relationOids,
+ DatumGetObjectId(con->constvalue));
+ }
+ else if (IsA(node, GroupingFunc))
+ {
+ GroupingFunc *g = (GroupingFunc *) node;
+ AttrNumber *grouping_map = root->grouping_map;
+
+ /* If there are no grouping sets, we don't need this. */
+
+ Assert(grouping_map || g->cols == NIL);
+
+ if (grouping_map)
+ {
+ ListCell *lc;
+ List *cols = NIL;
+
+ foreach(lc, g->refs)
+ {
+ cols = lappend_int(cols, grouping_map[lfirst_int(lc)]);
+ }
+
+ Assert(!g->cols || equal(cols, g->cols));
+
+ if (!g->cols)
+ g->cols = cols;
+ }
+ }
+}
+
+/*
+ * fix_param_node
+ * Do set_plan_references processing on a Param
+ *
+ * If it's a PARAM_MULTIEXPR, replace it with the appropriate Param from
+ * root->multiexpr_params; otherwise no change is needed.
+ * Just for paranoia's sake, we make a copy of the node in either case.
+ */
+static Node *
+fix_param_node(PlannerInfo *root, Param *p)
+{
+ if (p->paramkind == PARAM_MULTIEXPR)
+ {
+ int subqueryid = p->paramid >> 16;
+ int colno = p->paramid & 0xFFFF;
+ List *params;
+
+ if (subqueryid <= 0 ||
+ subqueryid > list_length(root->multiexpr_params))
+ elog(ERROR, "unexpected PARAM_MULTIEXPR ID: %d", p->paramid);
+ params = (List *) list_nth(root->multiexpr_params, subqueryid - 1);
+ if (colno <= 0 || colno > list_length(params))
+ elog(ERROR, "unexpected PARAM_MULTIEXPR ID: %d", p->paramid);
+ return copyObject(list_nth(params, colno - 1));
+ }
+ return (Node *) copyObject(p);
+}
+
+/*
+ * fix_alternative_subplan
+ * Do set_plan_references processing on an AlternativeSubPlan
+ *
+ * Choose one of the alternative implementations and return just that one,
+ * discarding the rest of the AlternativeSubPlan structure.
+ * Note: caller must still recurse into the result!
+ *
+ * We don't make any attempt to fix up cost estimates in the parent plan
+ * node or higher-level nodes.
+ */
+static Node *
+fix_alternative_subplan(PlannerInfo *root, AlternativeSubPlan *asplan,
+ double num_exec)
+{
+ SubPlan *bestplan = NULL;
+ Cost bestcost = 0;
+ ListCell *lc;
+
+ /*
+ * Compute the estimated cost of each subplan assuming num_exec
+ * executions, and keep the cheapest one. In event of exact equality of
+ * estimates, we prefer the later plan; this is a bit arbitrary, but in
+ * current usage it biases us to break ties against fast-start subplans.
+ */
+ Assert(asplan->subplans != NIL);
+
+ foreach(lc, asplan->subplans)
+ {
+ SubPlan *curplan = (SubPlan *) lfirst(lc);
+ Cost curcost;
+
+ curcost = curplan->startup_cost + num_exec * curplan->per_call_cost;
+ if (bestplan == NULL || curcost <= bestcost)
+ {
+ bestplan = curplan;
+ bestcost = curcost;
+ }
+
+ /* Also mark all subplans that are in AlternativeSubPlans */
+ root->isAltSubplan[curplan->plan_id - 1] = true;
+ }
+
+ /* Mark the subplan we selected */
+ root->isUsedSubplan[bestplan->plan_id - 1] = true;
+
+ return (Node *) bestplan;
+}
+
+/*
+ * fix_scan_expr
+ * Do set_plan_references processing on a scan-level expression
+ *
+ * This consists of incrementing all Vars' varnos by rtoffset,
+ * replacing PARAM_MULTIEXPR Params, expanding PlaceHolderVars,
+ * replacing Aggref nodes that should be replaced by initplan output Params,
+ * choosing the best implementation for AlternativeSubPlans,
+ * looking up operator opcode info for OpExpr and related nodes,
+ * and adding OIDs from regclass Const nodes into root->glob->relationOids.
+ *
+ * 'node': the expression to be modified
+ * 'rtoffset': how much to increment varnos by
+ * 'num_exec': estimated number of executions of expression
+ *
+ * The expression tree is either copied-and-modified, or modified in-place
+ * if that seems safe.
+ */
+static Node *
+fix_scan_expr(PlannerInfo *root, Node *node, int rtoffset, double num_exec)
+{
+ fix_scan_expr_context context;
+
+ context.root = root;
+ context.rtoffset = rtoffset;
+ context.num_exec = num_exec;
+
+ if (rtoffset != 0 ||
+ root->multiexpr_params != NIL ||
+ root->glob->lastPHId != 0 ||
+ root->minmax_aggs != NIL ||
+ root->hasAlternativeSubPlans)
+ {
+ return fix_scan_expr_mutator(node, &context);
+ }
+ else
+ {
+ /*
+ * If rtoffset == 0, we don't need to change any Vars, and if there
+ * are no MULTIEXPR subqueries then we don't need to replace
+ * PARAM_MULTIEXPR Params, and if there are no placeholders anywhere
+ * we won't need to remove them, and if there are no minmax Aggrefs we
+ * won't need to replace them, and if there are no AlternativeSubPlans
+ * we won't need to remove them. Then it's OK to just scribble on the
+ * input node tree instead of copying (since the only change, filling
+ * in any unset opfuncid fields, is harmless). This saves just enough
+ * cycles to be noticeable on trivial queries.
+ */
+ (void) fix_scan_expr_walker(node, &context);
+ return node;
+ }
+}
+
+static Node *
+fix_scan_expr_mutator(Node *node, fix_scan_expr_context *context)
+{
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, Var))
+ {
+ Var *var = copyVar((Var *) node);
+
+ Assert(var->varlevelsup == 0);
+
+ /*
+ * We should not see Vars marked INNER_VAR, OUTER_VAR, or ROWID_VAR.
+ * But an indexqual expression could contain INDEX_VAR Vars.
+ */
+ Assert(var->varno != INNER_VAR);
+ Assert(var->varno != OUTER_VAR);
+ Assert(var->varno != ROWID_VAR);
+ if (!IS_SPECIAL_VARNO(var->varno))
+ var->varno += context->rtoffset;
+ if (var->varnosyn > 0)
+ var->varnosyn += context->rtoffset;
+ return (Node *) var;
+ }
+ if (IsA(node, Param))
+ return fix_param_node(context->root, (Param *) node);
+ if (IsA(node, Aggref))
+ {
+ Aggref *aggref = (Aggref *) node;
+
+ /* See if the Aggref should be replaced by a Param */
+ if (context->root->minmax_aggs != NIL &&
+ list_length(aggref->args) == 1)
+ {
+ TargetEntry *curTarget = (TargetEntry *) linitial(aggref->args);
+ ListCell *lc;
+
+ foreach(lc, context->root->minmax_aggs)
+ {
+ MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
+
+ if (mminfo->aggfnoid == aggref->aggfnoid &&
+ equal(mminfo->target, curTarget->expr))
+ return (Node *) copyObject(mminfo->param);
+ }
+ }
+ /* If no match, just fall through to process it normally */
+ }
+ if (IsA(node, CurrentOfExpr))
+ {
+ CurrentOfExpr *cexpr = (CurrentOfExpr *) copyObject(node);
+
+ Assert(!IS_SPECIAL_VARNO(cexpr->cvarno));
+ cexpr->cvarno += context->rtoffset;
+ return (Node *) cexpr;
+ }
+ if (IsA(node, PlaceHolderVar))
+ {
+ /* At scan level, we should always just evaluate the contained expr */
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+
+ return fix_scan_expr_mutator((Node *) phv->phexpr, context);
+ }
+ if (IsA(node, AlternativeSubPlan))
+ return fix_scan_expr_mutator(fix_alternative_subplan(context->root,
+ (AlternativeSubPlan *) node,
+ context->num_exec),
+ context);
+ fix_expr_common(context->root, node);
+ return expression_tree_mutator(node, fix_scan_expr_mutator,
+ (void *) context);
+}
+
+static bool
+fix_scan_expr_walker(Node *node, fix_scan_expr_context *context)
+{
+ if (node == NULL)
+ return false;
+ Assert(!(IsA(node, Var) && ((Var *) node)->varno == ROWID_VAR));
+ Assert(!IsA(node, PlaceHolderVar));
+ Assert(!IsA(node, AlternativeSubPlan));
+ fix_expr_common(context->root, node);
+ return expression_tree_walker(node, fix_scan_expr_walker,
+ (void *) context);
+}
+
+/*
+ * set_join_references
+ * Modify the target list and quals of a join node to reference its
+ * subplans, by setting the varnos to OUTER_VAR or INNER_VAR and setting
+ * attno values to the result domain number of either the corresponding
+ * outer or inner join tuple item. Also perform opcode lookup for these
+ * expressions, and add regclass OIDs to root->glob->relationOids.
+ */
+static void
+set_join_references(PlannerInfo *root, Join *join, int rtoffset)
+{
+ Plan *outer_plan = join->plan.lefttree;
+ Plan *inner_plan = join->plan.righttree;
+ indexed_tlist *outer_itlist;
+ indexed_tlist *inner_itlist;
+
+ outer_itlist = build_tlist_index(outer_plan->targetlist);
+ inner_itlist = build_tlist_index(inner_plan->targetlist);
+
+ /*
+ * First process the joinquals (including merge or hash clauses). These
+ * are logically below the join so they can always use all values
+ * available from the input tlists. It's okay to also handle
+ * NestLoopParams now, because those couldn't refer to nullable
+ * subexpressions.
+ */
+ join->joinqual = fix_join_expr(root,
+ join->joinqual,
+ outer_itlist,
+ inner_itlist,
+ (Index) 0,
+ rtoffset,
+ NUM_EXEC_QUAL((Plan *) join));
+
+ /* Now do join-type-specific stuff */
+ if (IsA(join, NestLoop))
+ {
+ NestLoop *nl = (NestLoop *) join;
+ ListCell *lc;
+
+ foreach(lc, nl->nestParams)
+ {
+ NestLoopParam *nlp = (NestLoopParam *) lfirst(lc);
+
+ nlp->paramval = (Var *) fix_upper_expr(root,
+ (Node *) nlp->paramval,
+ outer_itlist,
+ OUTER_VAR,
+ rtoffset,
+ NUM_EXEC_TLIST(outer_plan));
+ /* Check we replaced any PlaceHolderVar with simple Var */
+ if (!(IsA(nlp->paramval, Var) &&
+ nlp->paramval->varno == OUTER_VAR))
+ elog(ERROR, "NestLoopParam was not reduced to a simple Var");
+ }
+ }
+ else if (IsA(join, MergeJoin))
+ {
+ MergeJoin *mj = (MergeJoin *) join;
+
+ mj->mergeclauses = fix_join_expr(root,
+ mj->mergeclauses,
+ outer_itlist,
+ inner_itlist,
+ (Index) 0,
+ rtoffset,
+ NUM_EXEC_QUAL((Plan *) join));
+ }
+ else if (IsA(join, HashJoin))
+ {
+ HashJoin *hj = (HashJoin *) join;
+
+ hj->hashclauses = fix_join_expr(root,
+ hj->hashclauses,
+ outer_itlist,
+ inner_itlist,
+ (Index) 0,
+ rtoffset,
+ NUM_EXEC_QUAL((Plan *) join));
+
+ /*
+ * HashJoin's hashkeys are used to look for matching tuples from its
+ * outer plan (not the Hash node!) in the hashtable.
+ */
+ hj->hashkeys = (List *) fix_upper_expr(root,
+ (Node *) hj->hashkeys,
+ outer_itlist,
+ OUTER_VAR,
+ rtoffset,
+ NUM_EXEC_QUAL((Plan *) join));
+ }
+
+ /*
+ * Now we need to fix up the targetlist and qpqual, which are logically
+ * above the join. This means they should not re-use any input expression
+ * that was computed in the nullable side of an outer join. Vars and
+ * PlaceHolderVars are fine, so we can implement this restriction just by
+ * clearing has_non_vars in the indexed_tlist structs.
+ *
+ * XXX This is a grotty workaround for the fact that we don't clearly
+ * distinguish between a Var appearing below an outer join and the "same"
+ * Var appearing above it. If we did, we'd not need to hack the matching
+ * rules this way.
+ */
+ switch (join->jointype)
+ {
+ case JOIN_LEFT:
+ case JOIN_SEMI:
+ case JOIN_ANTI:
+ inner_itlist->has_non_vars = false;
+ break;
+ case JOIN_RIGHT:
+ outer_itlist->has_non_vars = false;
+ break;
+ case JOIN_FULL:
+ outer_itlist->has_non_vars = false;
+ inner_itlist->has_non_vars = false;
+ break;
+ default:
+ break;
+ }
+
+ join->plan.targetlist = fix_join_expr(root,
+ join->plan.targetlist,
+ outer_itlist,
+ inner_itlist,
+ (Index) 0,
+ rtoffset,
+ NUM_EXEC_TLIST((Plan *) join));
+ join->plan.qual = fix_join_expr(root,
+ join->plan.qual,
+ outer_itlist,
+ inner_itlist,
+ (Index) 0,
+ rtoffset,
+ NUM_EXEC_QUAL((Plan *) join));
+
+ pfree(outer_itlist);
+ pfree(inner_itlist);
+}
+
+/*
+ * set_upper_references
+ * Update the targetlist and quals of an upper-level plan node
+ * to refer to the tuples returned by its lefttree subplan.
+ * Also perform opcode lookup for these expressions, and
+ * add regclass OIDs to root->glob->relationOids.
+ *
+ * This is used for single-input plan types like Agg, Group, Result.
+ *
+ * In most cases, we have to match up individual Vars in the tlist and
+ * qual expressions with elements of the subplan's tlist (which was
+ * generated by flattening these selfsame expressions, so it should have all
+ * the required variables). There is an important exception, however:
+ * depending on where we are in the plan tree, sort/group columns may have
+ * been pushed into the subplan tlist unflattened. If these values are also
+ * needed in the output then we want to reference the subplan tlist element
+ * rather than recomputing the expression.
+ */
+static void
+set_upper_references(PlannerInfo *root, Plan *plan, int rtoffset)
+{
+ Plan *subplan = plan->lefttree;
+ indexed_tlist *subplan_itlist;
+ List *output_targetlist;
+ ListCell *l;
+
+ subplan_itlist = build_tlist_index(subplan->targetlist);
+
+ output_targetlist = NIL;
+ foreach(l, plan->targetlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+ Node *newexpr;
+
+ /* If it's a sort/group item, first try to match by sortref */
+ if (tle->ressortgroupref != 0)
+ {
+ newexpr = (Node *)
+ search_indexed_tlist_for_sortgroupref(tle->expr,
+ tle->ressortgroupref,
+ subplan_itlist,
+ OUTER_VAR);
+ if (!newexpr)
+ newexpr = fix_upper_expr(root,
+ (Node *) tle->expr,
+ subplan_itlist,
+ OUTER_VAR,
+ rtoffset,
+ NUM_EXEC_TLIST(plan));
+ }
+ else
+ newexpr = fix_upper_expr(root,
+ (Node *) tle->expr,
+ subplan_itlist,
+ OUTER_VAR,
+ rtoffset,
+ NUM_EXEC_TLIST(plan));
+ tle = flatCopyTargetEntry(tle);
+ tle->expr = (Expr *) newexpr;
+ output_targetlist = lappend(output_targetlist, tle);
+ }
+ plan->targetlist = output_targetlist;
+
+ plan->qual = (List *)
+ fix_upper_expr(root,
+ (Node *) plan->qual,
+ subplan_itlist,
+ OUTER_VAR,
+ rtoffset,
+ NUM_EXEC_QUAL(plan));
+
+ pfree(subplan_itlist);
+}
+
+/*
+ * set_param_references
+ * Initialize the initParam list in Gather or Gather merge node such that
+ * it contains reference of all the params that needs to be evaluated
+ * before execution of the node. It contains the initplan params that are
+ * being passed to the plan nodes below it.
+ */
+static void
+set_param_references(PlannerInfo *root, Plan *plan)
+{
+ Assert(IsA(plan, Gather) || IsA(plan, GatherMerge));
+
+ if (plan->lefttree->extParam)
+ {
+ PlannerInfo *proot;
+ Bitmapset *initSetParam = NULL;
+ ListCell *l;
+
+ for (proot = root; proot != NULL; proot = proot->parent_root)
+ {
+ foreach(l, proot->init_plans)
+ {
+ SubPlan *initsubplan = (SubPlan *) lfirst(l);
+ ListCell *l2;
+
+ foreach(l2, initsubplan->setParam)
+ {
+ initSetParam = bms_add_member(initSetParam, lfirst_int(l2));
+ }
+ }
+ }
+
+ /*
+ * Remember the list of all external initplan params that are used by
+ * the children of Gather or Gather merge node.
+ */
+ if (IsA(plan, Gather))
+ ((Gather *) plan)->initParam =
+ bms_intersect(plan->lefttree->extParam, initSetParam);
+ else
+ ((GatherMerge *) plan)->initParam =
+ bms_intersect(plan->lefttree->extParam, initSetParam);
+ }
+}
+
+/*
+ * Recursively scan an expression tree and convert Aggrefs to the proper
+ * intermediate form for combining aggregates. This means (1) replacing each
+ * one's argument list with a single argument that is the original Aggref
+ * modified to show partial aggregation and (2) changing the upper Aggref to
+ * show combining aggregation.
+ *
+ * After this step, set_upper_references will replace the partial Aggrefs
+ * with Vars referencing the lower Agg plan node's outputs, so that the final
+ * form seen by the executor is a combining Aggref with a Var as input.
+ *
+ * It's rather messy to postpone this step until setrefs.c; ideally it'd be
+ * done in createplan.c. The difficulty is that once we modify the Aggref
+ * expressions, they will no longer be equal() to their original form and
+ * so cross-plan-node-level matches will fail. So this has to happen after
+ * the plan node above the Agg has resolved its subplan references.
+ */
+static Node *
+convert_combining_aggrefs(Node *node, void *context)
+{
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, Aggref))
+ {
+ Aggref *orig_agg = (Aggref *) node;
+ Aggref *child_agg;
+ Aggref *parent_agg;
+
+ /* Assert we've not chosen to partial-ize any unsupported cases */
+ Assert(orig_agg->aggorder == NIL);
+ Assert(orig_agg->aggdistinct == NIL);
+
+ /*
+ * Since aggregate calls can't be nested, we needn't recurse into the
+ * arguments. But for safety, flat-copy the Aggref node itself rather
+ * than modifying it in-place.
+ */
+ child_agg = makeNode(Aggref);
+ memcpy(child_agg, orig_agg, sizeof(Aggref));
+
+ /*
+ * For the parent Aggref, we want to copy all the fields of the
+ * original aggregate *except* the args list, which we'll replace
+ * below, and the aggfilter expression, which should be applied only
+ * by the child not the parent. Rather than explicitly knowing about
+ * all the other fields here, we can momentarily modify child_agg to
+ * provide a suitable source for copyObject.
+ */
+ child_agg->args = NIL;
+ child_agg->aggfilter = NULL;
+ parent_agg = copyObject(child_agg);
+ child_agg->args = orig_agg->args;
+ child_agg->aggfilter = orig_agg->aggfilter;
+
+ /*
+ * Now, set up child_agg to represent the first phase of partial
+ * aggregation. For now, assume serialization is required.
+ */
+ mark_partial_aggref(child_agg, AGGSPLIT_INITIAL_SERIAL);
+
+ /*
+ * And set up parent_agg to represent the second phase.
+ */
+ parent_agg->args = list_make1(makeTargetEntry((Expr *) child_agg,
+ 1, NULL, false));
+ mark_partial_aggref(parent_agg, AGGSPLIT_FINAL_DESERIAL);
+
+ return (Node *) parent_agg;
+ }
+ return expression_tree_mutator(node, convert_combining_aggrefs,
+ (void *) context);
+}
+
+/*
+ * set_dummy_tlist_references
+ * Replace the targetlist of an upper-level plan node with a simple
+ * list of OUTER_VAR references to its child.
+ *
+ * This is used for plan types like Sort and Append that don't evaluate
+ * their targetlists. Although the executor doesn't care at all what's in
+ * the tlist, EXPLAIN needs it to be realistic.
+ *
+ * Note: we could almost use set_upper_references() here, but it fails for
+ * Append for lack of a lefttree subplan. Single-purpose code is faster
+ * anyway.
+ */
+static void
+set_dummy_tlist_references(Plan *plan, int rtoffset)
+{
+ List *output_targetlist;
+ ListCell *l;
+
+ output_targetlist = NIL;
+ foreach(l, plan->targetlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+ Var *oldvar = (Var *) tle->expr;
+ Var *newvar;
+
+ /*
+ * As in search_indexed_tlist_for_non_var(), we prefer to keep Consts
+ * as Consts, not Vars referencing Consts. Here, there's no speed
+ * advantage to be had, but it makes EXPLAIN output look cleaner, and
+ * again it avoids confusing the executor.
+ */
+ if (IsA(oldvar, Const))
+ {
+ /* just reuse the existing TLE node */
+ output_targetlist = lappend(output_targetlist, tle);
+ continue;
+ }
+
+ newvar = makeVar(OUTER_VAR,
+ tle->resno,
+ exprType((Node *) oldvar),
+ exprTypmod((Node *) oldvar),
+ exprCollation((Node *) oldvar),
+ 0);
+ if (IsA(oldvar, Var) &&
+ oldvar->varnosyn > 0)
+ {
+ newvar->varnosyn = oldvar->varnosyn + rtoffset;
+ newvar->varattnosyn = oldvar->varattnosyn;
+ }
+ else
+ {
+ newvar->varnosyn = 0; /* wasn't ever a plain Var */
+ newvar->varattnosyn = 0;
+ }
+
+ tle = flatCopyTargetEntry(tle);
+ tle->expr = (Expr *) newvar;
+ output_targetlist = lappend(output_targetlist, tle);
+ }
+ plan->targetlist = output_targetlist;
+
+ /* We don't touch plan->qual here */
+}
+
+
+/*
+ * build_tlist_index --- build an index data structure for a child tlist
+ *
+ * In most cases, subplan tlists will be "flat" tlists with only Vars,
+ * so we try to optimize that case by extracting information about Vars
+ * in advance. Matching a parent tlist to a child is still an O(N^2)
+ * operation, but at least with a much smaller constant factor than plain
+ * tlist_member() searches.
+ *
+ * The result of this function is an indexed_tlist struct to pass to
+ * search_indexed_tlist_for_var() or search_indexed_tlist_for_non_var().
+ * When done, the indexed_tlist may be freed with a single pfree().
+ */
+static indexed_tlist *
+build_tlist_index(List *tlist)
+{
+ indexed_tlist *itlist;
+ tlist_vinfo *vinfo;
+ ListCell *l;
+
+ /* Create data structure with enough slots for all tlist entries */
+ itlist = (indexed_tlist *)
+ palloc(offsetof(indexed_tlist, vars) +
+ list_length(tlist) * sizeof(tlist_vinfo));
+
+ itlist->tlist = tlist;
+ itlist->has_ph_vars = false;
+ itlist->has_non_vars = false;
+
+ /* Find the Vars and fill in the index array */
+ vinfo = itlist->vars;
+ foreach(l, tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (tle->expr && IsA(tle->expr, Var))
+ {
+ Var *var = (Var *) tle->expr;
+
+ vinfo->varno = var->varno;
+ vinfo->varattno = var->varattno;
+ vinfo->resno = tle->resno;
+ vinfo++;
+ }
+ else if (tle->expr && IsA(tle->expr, PlaceHolderVar))
+ itlist->has_ph_vars = true;
+ else
+ itlist->has_non_vars = true;
+ }
+
+ itlist->num_vars = (vinfo - itlist->vars);
+
+ return itlist;
+}
+
+/*
+ * build_tlist_index_other_vars --- build a restricted tlist index
+ *
+ * This is like build_tlist_index, but we only index tlist entries that
+ * are Vars belonging to some rel other than the one specified. We will set
+ * has_ph_vars (allowing PlaceHolderVars to be matched), but not has_non_vars
+ * (so nothing other than Vars and PlaceHolderVars can be matched).
+ */
+static indexed_tlist *
+build_tlist_index_other_vars(List *tlist, int ignore_rel)
+{
+ indexed_tlist *itlist;
+ tlist_vinfo *vinfo;
+ ListCell *l;
+
+ /* Create data structure with enough slots for all tlist entries */
+ itlist = (indexed_tlist *)
+ palloc(offsetof(indexed_tlist, vars) +
+ list_length(tlist) * sizeof(tlist_vinfo));
+
+ itlist->tlist = tlist;
+ itlist->has_ph_vars = false;
+ itlist->has_non_vars = false;
+
+ /* Find the desired Vars and fill in the index array */
+ vinfo = itlist->vars;
+ foreach(l, tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (tle->expr && IsA(tle->expr, Var))
+ {
+ Var *var = (Var *) tle->expr;
+
+ if (var->varno != ignore_rel)
+ {
+ vinfo->varno = var->varno;
+ vinfo->varattno = var->varattno;
+ vinfo->resno = tle->resno;
+ vinfo++;
+ }
+ }
+ else if (tle->expr && IsA(tle->expr, PlaceHolderVar))
+ itlist->has_ph_vars = true;
+ }
+
+ itlist->num_vars = (vinfo - itlist->vars);
+
+ return itlist;
+}
+
+/*
+ * search_indexed_tlist_for_var --- find a Var in an indexed tlist
+ *
+ * If a match is found, return a copy of the given Var with suitably
+ * modified varno/varattno (to wit, newvarno and the resno of the TLE entry).
+ * Also ensure that varnosyn is incremented by rtoffset.
+ * If no match, return NULL.
+ */
+static Var *
+search_indexed_tlist_for_var(Var *var, indexed_tlist *itlist,
+ int newvarno, int rtoffset)
+{
+ int varno = var->varno;
+ AttrNumber varattno = var->varattno;
+ tlist_vinfo *vinfo;
+ int i;
+
+ vinfo = itlist->vars;
+ i = itlist->num_vars;
+ while (i-- > 0)
+ {
+ if (vinfo->varno == varno && vinfo->varattno == varattno)
+ {
+ /* Found a match */
+ Var *newvar = copyVar(var);
+
+ newvar->varno = newvarno;
+ newvar->varattno = vinfo->resno;
+ if (newvar->varnosyn > 0)
+ newvar->varnosyn += rtoffset;
+ return newvar;
+ }
+ vinfo++;
+ }
+ return NULL; /* no match */
+}
+
+/*
+ * search_indexed_tlist_for_non_var --- find a non-Var in an indexed tlist
+ *
+ * If a match is found, return a Var constructed to reference the tlist item.
+ * If no match, return NULL.
+ *
+ * NOTE: it is a waste of time to call this unless itlist->has_ph_vars or
+ * itlist->has_non_vars. Furthermore, set_join_references() relies on being
+ * able to prevent matching of non-Vars by clearing itlist->has_non_vars,
+ * so there's a correctness reason not to call it unless that's set.
+ */
+static Var *
+search_indexed_tlist_for_non_var(Expr *node,
+ indexed_tlist *itlist, int newvarno)
+{
+ TargetEntry *tle;
+
+ /*
+ * If it's a simple Const, replacing it with a Var is silly, even if there
+ * happens to be an identical Const below; a Var is more expensive to
+ * execute than a Const. What's more, replacing it could confuse some
+ * places in the executor that expect to see simple Consts for, eg,
+ * dropped columns.
+ */
+ if (IsA(node, Const))
+ return NULL;
+
+ tle = tlist_member(node, itlist->tlist);
+ if (tle)
+ {
+ /* Found a matching subplan output expression */
+ Var *newvar;
+
+ newvar = makeVarFromTargetEntry(newvarno, tle);
+ newvar->varnosyn = 0; /* wasn't ever a plain Var */
+ newvar->varattnosyn = 0;
+ return newvar;
+ }
+ return NULL; /* no match */
+}
+
+/*
+ * search_indexed_tlist_for_sortgroupref --- find a sort/group expression
+ *
+ * If a match is found, return a Var constructed to reference the tlist item.
+ * If no match, return NULL.
+ *
+ * This is needed to ensure that we select the right subplan TLE in cases
+ * where there are multiple textually-equal()-but-volatile sort expressions.
+ * And it's also faster than search_indexed_tlist_for_non_var.
+ */
+static Var *
+search_indexed_tlist_for_sortgroupref(Expr *node,
+ Index sortgroupref,
+ indexed_tlist *itlist,
+ int newvarno)
+{
+ ListCell *lc;
+
+ foreach(lc, itlist->tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lc);
+
+ /* The equal() check should be redundant, but let's be paranoid */
+ if (tle->ressortgroupref == sortgroupref &&
+ equal(node, tle->expr))
+ {
+ /* Found a matching subplan output expression */
+ Var *newvar;
+
+ newvar = makeVarFromTargetEntry(newvarno, tle);
+ newvar->varnosyn = 0; /* wasn't ever a plain Var */
+ newvar->varattnosyn = 0;
+ return newvar;
+ }
+ }
+ return NULL; /* no match */
+}
+
+/*
+ * fix_join_expr
+ * Create a new set of targetlist entries or join qual clauses by
+ * changing the varno/varattno values of variables in the clauses
+ * to reference target list values from the outer and inner join
+ * relation target lists. Also perform opcode lookup and add
+ * regclass OIDs to root->glob->relationOids.
+ *
+ * This is used in four different scenarios:
+ * 1) a normal join clause, where all the Vars in the clause *must* be
+ * replaced by OUTER_VAR or INNER_VAR references. In this case
+ * acceptable_rel should be zero so that any failure to match a Var will be
+ * reported as an error.
+ * 2) RETURNING clauses, which may contain both Vars of the target relation
+ * and Vars of other relations. In this case we want to replace the
+ * other-relation Vars by OUTER_VAR references, while leaving target Vars
+ * alone. Thus inner_itlist = NULL and acceptable_rel = the ID of the
+ * target relation should be passed.
+ * 3) ON CONFLICT UPDATE SET/WHERE clauses. Here references to EXCLUDED are
+ * to be replaced with INNER_VAR references, while leaving target Vars (the
+ * to-be-updated relation) alone. Correspondingly inner_itlist is to be
+ * EXCLUDED elements, outer_itlist = NULL and acceptable_rel the target
+ * relation.
+ * 4) MERGE. In this case, references to the source relation are to be
+ * replaced with INNER_VAR references, leaving Vars of the target
+ * relation (the to-be-modified relation) alone. So inner_itlist is to be
+ * the source relation elements, outer_itlist = NULL and acceptable_rel
+ * the target relation.
+ *
+ * 'clauses' is the targetlist or list of join clauses
+ * 'outer_itlist' is the indexed target list of the outer join relation,
+ * or NULL
+ * 'inner_itlist' is the indexed target list of the inner join relation,
+ * or NULL
+ * 'acceptable_rel' is either zero or the rangetable index of a relation
+ * whose Vars may appear in the clause without provoking an error
+ * 'rtoffset': how much to increment varnos by
+ * 'num_exec': estimated number of executions of expression
+ *
+ * Returns the new expression tree. The original clause structure is
+ * not modified.
+ */
+static List *
+fix_join_expr(PlannerInfo *root,
+ List *clauses,
+ indexed_tlist *outer_itlist,
+ indexed_tlist *inner_itlist,
+ Index acceptable_rel,
+ int rtoffset,
+ double num_exec)
+{
+ fix_join_expr_context context;
+
+ context.root = root;
+ context.outer_itlist = outer_itlist;
+ context.inner_itlist = inner_itlist;
+ context.acceptable_rel = acceptable_rel;
+ context.rtoffset = rtoffset;
+ context.num_exec = num_exec;
+ return (List *) fix_join_expr_mutator((Node *) clauses, &context);
+}
+
+static Node *
+fix_join_expr_mutator(Node *node, fix_join_expr_context *context)
+{
+ Var *newvar;
+
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+
+ /* Look for the var in the input tlists, first in the outer */
+ if (context->outer_itlist)
+ {
+ newvar = search_indexed_tlist_for_var(var,
+ context->outer_itlist,
+ OUTER_VAR,
+ context->rtoffset);
+ if (newvar)
+ return (Node *) newvar;
+ }
+
+ /* then in the inner. */
+ if (context->inner_itlist)
+ {
+ newvar = search_indexed_tlist_for_var(var,
+ context->inner_itlist,
+ INNER_VAR,
+ context->rtoffset);
+ if (newvar)
+ return (Node *) newvar;
+ }
+
+ /* If it's for acceptable_rel, adjust and return it */
+ if (var->varno == context->acceptable_rel)
+ {
+ var = copyVar(var);
+ var->varno += context->rtoffset;
+ if (var->varnosyn > 0)
+ var->varnosyn += context->rtoffset;
+ return (Node *) var;
+ }
+
+ /* No referent found for Var */
+ elog(ERROR, "variable not found in subplan target lists");
+ }
+ if (IsA(node, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+
+ /* See if the PlaceHolderVar has bubbled up from a lower plan node */
+ if (context->outer_itlist && context->outer_itlist->has_ph_vars)
+ {
+ newvar = search_indexed_tlist_for_non_var((Expr *) phv,
+ context->outer_itlist,
+ OUTER_VAR);
+ if (newvar)
+ return (Node *) newvar;
+ }
+ if (context->inner_itlist && context->inner_itlist->has_ph_vars)
+ {
+ newvar = search_indexed_tlist_for_non_var((Expr *) phv,
+ context->inner_itlist,
+ INNER_VAR);
+ if (newvar)
+ return (Node *) newvar;
+ }
+
+ /* If not supplied by input plans, evaluate the contained expr */
+ return fix_join_expr_mutator((Node *) phv->phexpr, context);
+ }
+ /* Try matching more complex expressions too, if tlists have any */
+ if (context->outer_itlist && context->outer_itlist->has_non_vars)
+ {
+ newvar = search_indexed_tlist_for_non_var((Expr *) node,
+ context->outer_itlist,
+ OUTER_VAR);
+ if (newvar)
+ return (Node *) newvar;
+ }
+ if (context->inner_itlist && context->inner_itlist->has_non_vars)
+ {
+ newvar = search_indexed_tlist_for_non_var((Expr *) node,
+ context->inner_itlist,
+ INNER_VAR);
+ if (newvar)
+ return (Node *) newvar;
+ }
+ /* Special cases (apply only AFTER failing to match to lower tlist) */
+ if (IsA(node, Param))
+ return fix_param_node(context->root, (Param *) node);
+ if (IsA(node, AlternativeSubPlan))
+ return fix_join_expr_mutator(fix_alternative_subplan(context->root,
+ (AlternativeSubPlan *) node,
+ context->num_exec),
+ context);
+ fix_expr_common(context->root, node);
+ return expression_tree_mutator(node,
+ fix_join_expr_mutator,
+ (void *) context);
+}
+
+/*
+ * fix_upper_expr
+ * Modifies an expression tree so that all Var nodes reference outputs
+ * of a subplan. Also looks for Aggref nodes that should be replaced
+ * by initplan output Params. Also performs opcode lookup, and adds
+ * regclass OIDs to root->glob->relationOids.
+ *
+ * This is used to fix up target and qual expressions of non-join upper-level
+ * plan nodes, as well as index-only scan nodes.
+ *
+ * An error is raised if no matching var can be found in the subplan tlist
+ * --- so this routine should only be applied to nodes whose subplans'
+ * targetlists were generated by flattening the expressions used in the
+ * parent node.
+ *
+ * If itlist->has_non_vars is true, then we try to match whole subexpressions
+ * against elements of the subplan tlist, so that we can avoid recomputing
+ * expressions that were already computed by the subplan. (This is relatively
+ * expensive, so we don't want to try it in the common case where the
+ * subplan tlist is just a flattened list of Vars.)
+ *
+ * 'node': the tree to be fixed (a target item or qual)
+ * 'subplan_itlist': indexed target list for subplan (or index)
+ * 'newvarno': varno to use for Vars referencing tlist elements
+ * 'rtoffset': how much to increment varnos by
+ * 'num_exec': estimated number of executions of expression
+ *
+ * The resulting tree is a copy of the original in which all Var nodes have
+ * varno = newvarno, varattno = resno of corresponding targetlist element.
+ * The original tree is not modified.
+ */
+static Node *
+fix_upper_expr(PlannerInfo *root,
+ Node *node,
+ indexed_tlist *subplan_itlist,
+ int newvarno,
+ int rtoffset,
+ double num_exec)
+{
+ fix_upper_expr_context context;
+
+ context.root = root;
+ context.subplan_itlist = subplan_itlist;
+ context.newvarno = newvarno;
+ context.rtoffset = rtoffset;
+ context.num_exec = num_exec;
+ return fix_upper_expr_mutator(node, &context);
+}
+
+static Node *
+fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context)
+{
+ Var *newvar;
+
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+
+ newvar = search_indexed_tlist_for_var(var,
+ context->subplan_itlist,
+ context->newvarno,
+ context->rtoffset);
+ if (!newvar)
+ elog(ERROR, "variable not found in subplan target list");
+ return (Node *) newvar;
+ }
+ if (IsA(node, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+
+ /* See if the PlaceHolderVar has bubbled up from a lower plan node */
+ if (context->subplan_itlist->has_ph_vars)
+ {
+ newvar = search_indexed_tlist_for_non_var((Expr *) phv,
+ context->subplan_itlist,
+ context->newvarno);
+ if (newvar)
+ return (Node *) newvar;
+ }
+ /* If not supplied by input plan, evaluate the contained expr */
+ return fix_upper_expr_mutator((Node *) phv->phexpr, context);
+ }
+ /* Try matching more complex expressions too, if tlist has any */
+ if (context->subplan_itlist->has_non_vars)
+ {
+ newvar = search_indexed_tlist_for_non_var((Expr *) node,
+ context->subplan_itlist,
+ context->newvarno);
+ if (newvar)
+ return (Node *) newvar;
+ }
+ /* Special cases (apply only AFTER failing to match to lower tlist) */
+ if (IsA(node, Param))
+ return fix_param_node(context->root, (Param *) node);
+ if (IsA(node, Aggref))
+ {
+ Aggref *aggref = (Aggref *) node;
+
+ /* See if the Aggref should be replaced by a Param */
+ if (context->root->minmax_aggs != NIL &&
+ list_length(aggref->args) == 1)
+ {
+ TargetEntry *curTarget = (TargetEntry *) linitial(aggref->args);
+ ListCell *lc;
+
+ foreach(lc, context->root->minmax_aggs)
+ {
+ MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
+
+ if (mminfo->aggfnoid == aggref->aggfnoid &&
+ equal(mminfo->target, curTarget->expr))
+ return (Node *) copyObject(mminfo->param);
+ }
+ }
+ /* If no match, just fall through to process it normally */
+ }
+ if (IsA(node, AlternativeSubPlan))
+ return fix_upper_expr_mutator(fix_alternative_subplan(context->root,
+ (AlternativeSubPlan *) node,
+ context->num_exec),
+ context);
+ fix_expr_common(context->root, node);
+ return expression_tree_mutator(node,
+ fix_upper_expr_mutator,
+ (void *) context);
+}
+
+/*
+ * set_returning_clause_references
+ * Perform setrefs.c's work on a RETURNING targetlist
+ *
+ * If the query involves more than just the result table, we have to
+ * adjust any Vars that refer to other tables to reference junk tlist
+ * entries in the top subplan's targetlist. Vars referencing the result
+ * table should be left alone, however (the executor will evaluate them
+ * using the actual heap tuple, after firing triggers if any). In the
+ * adjusted RETURNING list, result-table Vars will have their original
+ * varno (plus rtoffset), but Vars for other rels will have varno OUTER_VAR.
+ *
+ * We also must perform opcode lookup and add regclass OIDs to
+ * root->glob->relationOids.
+ *
+ * 'rlist': the RETURNING targetlist to be fixed
+ * 'topplan': the top subplan node that will be just below the ModifyTable
+ * node (note it's not yet passed through set_plan_refs)
+ * 'resultRelation': RT index of the associated result relation
+ * 'rtoffset': how much to increment varnos by
+ *
+ * Note: the given 'root' is for the parent query level, not the 'topplan'.
+ * This does not matter currently since we only access the dependency-item
+ * lists in root->glob, but it would need some hacking if we wanted a root
+ * that actually matches the subplan.
+ *
+ * Note: resultRelation is not yet adjusted by rtoffset.
+ */
+static List *
+set_returning_clause_references(PlannerInfo *root,
+ List *rlist,
+ Plan *topplan,
+ Index resultRelation,
+ int rtoffset)
+{
+ indexed_tlist *itlist;
+
+ /*
+ * We can perform the desired Var fixup by abusing the fix_join_expr
+ * machinery that formerly handled inner indexscan fixup. We search the
+ * top plan's targetlist for Vars of non-result relations, and use
+ * fix_join_expr to convert RETURNING Vars into references to those tlist
+ * entries, while leaving result-rel Vars as-is.
+ *
+ * PlaceHolderVars will also be sought in the targetlist, but no
+ * more-complex expressions will be. Note that it is not possible for a
+ * PlaceHolderVar to refer to the result relation, since the result is
+ * never below an outer join. If that case could happen, we'd have to be
+ * prepared to pick apart the PlaceHolderVar and evaluate its contained
+ * expression instead.
+ */
+ itlist = build_tlist_index_other_vars(topplan->targetlist, resultRelation);
+
+ rlist = fix_join_expr(root,
+ rlist,
+ itlist,
+ NULL,
+ resultRelation,
+ rtoffset,
+ NUM_EXEC_TLIST(topplan));
+
+ pfree(itlist);
+
+ return rlist;
+}
+
+/*
+ * fix_windowagg_condition_expr_mutator
+ * Mutator function for replacing WindowFuncs with the corresponding Var
+ * in the targetlist which references that WindowFunc.
+ */
+static Node *
+fix_windowagg_condition_expr_mutator(Node *node,
+ fix_windowagg_cond_context *context)
+{
+ if (node == NULL)
+ return NULL;
+
+ if (IsA(node, WindowFunc))
+ {
+ Var *newvar;
+
+ newvar = search_indexed_tlist_for_non_var((Expr *) node,
+ context->subplan_itlist,
+ context->newvarno);
+ if (newvar)
+ return (Node *) newvar;
+ elog(ERROR, "WindowFunc not found in subplan target lists");
+ }
+
+ return expression_tree_mutator(node,
+ fix_windowagg_condition_expr_mutator,
+ (void *) context);
+}
+
+/*
+ * fix_windowagg_condition_expr
+ * Converts references in 'runcondition' so that any WindowFunc
+ * references are swapped out for a Var which references the matching
+ * WindowFunc in 'subplan_itlist'.
+ */
+static List *
+fix_windowagg_condition_expr(PlannerInfo *root,
+ List *runcondition,
+ indexed_tlist *subplan_itlist)
+{
+ fix_windowagg_cond_context context;
+
+ context.root = root;
+ context.subplan_itlist = subplan_itlist;
+ context.newvarno = 0;
+
+ return (List *) fix_windowagg_condition_expr_mutator((Node *) runcondition,
+ &context);
+}
+
+/*
+ * set_windowagg_runcondition_references
+ * Converts references in 'runcondition' so that any WindowFunc
+ * references are swapped out for a Var which references the matching
+ * WindowFunc in 'plan' targetlist.
+ */
+static List *
+set_windowagg_runcondition_references(PlannerInfo *root,
+ List *runcondition,
+ Plan *plan)
+{
+ List *newlist;
+ indexed_tlist *itlist;
+
+ itlist = build_tlist_index(plan->targetlist);
+
+ newlist = fix_windowagg_condition_expr(root, runcondition, itlist);
+
+ pfree(itlist);
+
+ return newlist;
+}
+
+/*****************************************************************************
+ * QUERY DEPENDENCY MANAGEMENT
+ *****************************************************************************/
+
+/*
+ * record_plan_function_dependency
+ * Mark the current plan as depending on a particular function.
+ *
+ * This is exported so that the function-inlining code can record a
+ * dependency on a function that it's removed from the plan tree.
+ */
+void
+record_plan_function_dependency(PlannerInfo *root, Oid funcid)
+{
+ /*
+ * For performance reasons, we don't bother to track built-in functions;
+ * we just assume they'll never change (or at least not in ways that'd
+ * invalidate plans using them). For this purpose we can consider a
+ * built-in function to be one with OID less than FirstUnpinnedObjectId.
+ * Note that the OID generator guarantees never to generate such an OID
+ * after startup, even at OID wraparound.
+ */
+ if (funcid >= (Oid) FirstUnpinnedObjectId)
+ {
+ PlanInvalItem *inval_item = makeNode(PlanInvalItem);
+
+ /*
+ * It would work to use any syscache on pg_proc, but the easiest is
+ * PROCOID since we already have the function's OID at hand. Note
+ * that plancache.c knows we use PROCOID.
+ */
+ inval_item->cacheId = PROCOID;
+ inval_item->hashValue = GetSysCacheHashValue1(PROCOID,
+ ObjectIdGetDatum(funcid));
+
+ root->glob->invalItems = lappend(root->glob->invalItems, inval_item);
+ }
+}
+
+/*
+ * record_plan_type_dependency
+ * Mark the current plan as depending on a particular type.
+ *
+ * This is exported so that eval_const_expressions can record a
+ * dependency on a domain that it's removed a CoerceToDomain node for.
+ *
+ * We don't currently need to record dependencies on domains that the
+ * plan contains CoerceToDomain nodes for, though that might change in
+ * future. Hence, this isn't actually called in this module, though
+ * someday fix_expr_common might call it.
+ */
+void
+record_plan_type_dependency(PlannerInfo *root, Oid typid)
+{
+ /*
+ * As in record_plan_function_dependency, ignore the possibility that
+ * someone would change a built-in domain.
+ */
+ if (typid >= (Oid) FirstUnpinnedObjectId)
+ {
+ PlanInvalItem *inval_item = makeNode(PlanInvalItem);
+
+ /*
+ * It would work to use any syscache on pg_type, but the easiest is
+ * TYPEOID since we already have the type's OID at hand. Note that
+ * plancache.c knows we use TYPEOID.
+ */
+ inval_item->cacheId = TYPEOID;
+ inval_item->hashValue = GetSysCacheHashValue1(TYPEOID,
+ ObjectIdGetDatum(typid));
+
+ root->glob->invalItems = lappend(root->glob->invalItems, inval_item);
+ }
+}
+
+/*
+ * extract_query_dependencies
+ * Given a rewritten, but not yet planned, query or queries
+ * (i.e. a Query node or list of Query nodes), extract dependencies
+ * just as set_plan_references would do. Also detect whether any
+ * rewrite steps were affected by RLS.
+ *
+ * This is needed by plancache.c to handle invalidation of cached unplanned
+ * queries.
+ *
+ * Note: this does not go through eval_const_expressions, and hence doesn't
+ * reflect its additions of inlined functions and elided CoerceToDomain nodes
+ * to the invalItems list. This is obviously OK for functions, since we'll
+ * see them in the original query tree anyway. For domains, it's OK because
+ * we don't care about domains unless they get elided. That is, a plan might
+ * have domain dependencies that the query tree doesn't.
+ */
+void
+extract_query_dependencies(Node *query,
+ List **relationOids,
+ List **invalItems,
+ bool *hasRowSecurity)
+{
+ PlannerGlobal glob;
+ PlannerInfo root;
+
+ /* Make up dummy planner state so we can use this module's machinery */
+ MemSet(&glob, 0, sizeof(glob));
+ glob.type = T_PlannerGlobal;
+ glob.relationOids = NIL;
+ glob.invalItems = NIL;
+ /* Hack: we use glob.dependsOnRole to collect hasRowSecurity flags */
+ glob.dependsOnRole = false;
+
+ MemSet(&root, 0, sizeof(root));
+ root.type = T_PlannerInfo;
+ root.glob = &glob;
+
+ (void) extract_query_dependencies_walker(query, &root);
+
+ *relationOids = glob.relationOids;
+ *invalItems = glob.invalItems;
+ *hasRowSecurity = glob.dependsOnRole;
+}
+
+/*
+ * Tree walker for extract_query_dependencies.
+ *
+ * This is exported so that expression_planner_with_deps can call it on
+ * simple expressions (post-planning, not before planning, in that case).
+ * In that usage, glob.dependsOnRole isn't meaningful, but the relationOids
+ * and invalItems lists are added to as needed.
+ */
+bool
+extract_query_dependencies_walker(Node *node, PlannerInfo *context)
+{
+ if (node == NULL)
+ return false;
+ Assert(!IsA(node, PlaceHolderVar));
+ if (IsA(node, Query))
+ {
+ Query *query = (Query *) node;
+ ListCell *lc;
+
+ if (query->commandType == CMD_UTILITY)
+ {
+ /*
+ * This logic must handle any utility command for which parse
+ * analysis was nontrivial (cf. stmt_requires_parse_analysis).
+ *
+ * Notably, CALL requires its own processing.
+ */
+ if (IsA(query->utilityStmt, CallStmt))
+ {
+ CallStmt *callstmt = (CallStmt *) query->utilityStmt;
+
+ /* We need not examine funccall, just the transformed exprs */
+ (void) extract_query_dependencies_walker((Node *) callstmt->funcexpr,
+ context);
+ (void) extract_query_dependencies_walker((Node *) callstmt->outargs,
+ context);
+ return false;
+ }
+
+ /*
+ * Ignore other utility statements, except those (such as EXPLAIN)
+ * that contain a parsed-but-not-planned query. For those, we
+ * just need to transfer our attention to the contained query.
+ */
+ query = UtilityContainsQuery(query->utilityStmt);
+ if (query == NULL)
+ return false;
+ }
+
+ /* Remember if any Query has RLS quals applied by rewriter */
+ if (query->hasRowSecurity)
+ context->glob->dependsOnRole = true;
+
+ /* Collect relation OIDs in this Query's rtable */
+ foreach(lc, query->rtable)
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
+
+ if (rte->rtekind == RTE_RELATION)
+ context->glob->relationOids =
+ lappend_oid(context->glob->relationOids, rte->relid);
+ else if (rte->rtekind == RTE_NAMEDTUPLESTORE &&
+ OidIsValid(rte->relid))
+ context->glob->relationOids =
+ lappend_oid(context->glob->relationOids,
+ rte->relid);
+ }
+
+ /* And recurse into the query's subexpressions */
+ return query_tree_walker(query, extract_query_dependencies_walker,
+ (void *) context, 0);
+ }
+ /* Extract function dependencies and check for regclass Consts */
+ fix_expr_common(context, node);
+ return expression_tree_walker(node, extract_query_dependencies_walker,
+ (void *) context);
+}
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
new file mode 100644
index 0000000..a195788
--- /dev/null
+++ b/src/backend/optimizer/plan/subselect.c
@@ -0,0 +1,2999 @@
+/*-------------------------------------------------------------------------
+ *
+ * subselect.c
+ * Planning routines for subselects.
+ *
+ * This module deals with SubLinks and CTEs, but not subquery RTEs (i.e.,
+ * not sub-SELECT-in-FROM cases).
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/plan/subselect.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "catalog/pg_operator.h"
+#include "catalog/pg_type.h"
+#include "executor/executor.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/paramassign.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/planmain.h"
+#include "optimizer/planner.h"
+#include "optimizer/prep.h"
+#include "optimizer/subselect.h"
+#include "parser/parse_relation.h"
+#include "rewrite/rewriteManip.h"
+#include "utils/builtins.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+
+
+typedef struct convert_testexpr_context
+{
+ PlannerInfo *root;
+ List *subst_nodes; /* Nodes to substitute for Params */
+} convert_testexpr_context;
+
+typedef struct process_sublinks_context
+{
+ PlannerInfo *root;
+ bool isTopQual;
+} process_sublinks_context;
+
+typedef struct finalize_primnode_context
+{
+ PlannerInfo *root;
+ Bitmapset *paramids; /* Non-local PARAM_EXEC paramids found */
+} finalize_primnode_context;
+
+typedef struct inline_cte_walker_context
+{
+ const char *ctename; /* name and relative level of target CTE */
+ int levelsup;
+ Query *ctequery; /* query to substitute */
+} inline_cte_walker_context;
+
+
+static Node *build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot,
+ List *plan_params,
+ SubLinkType subLinkType, int subLinkId,
+ Node *testexpr, List *testexpr_paramids,
+ bool unknownEqFalse);
+static List *generate_subquery_params(PlannerInfo *root, List *tlist,
+ List **paramIds);
+static List *generate_subquery_vars(PlannerInfo *root, List *tlist,
+ Index varno);
+static Node *convert_testexpr(PlannerInfo *root,
+ Node *testexpr,
+ List *subst_nodes);
+static Node *convert_testexpr_mutator(Node *node,
+ convert_testexpr_context *context);
+static bool subplan_is_hashable(Plan *plan);
+static bool subpath_is_hashable(Path *path);
+static bool testexpr_is_hashable(Node *testexpr, List *param_ids);
+static bool test_opexpr_is_hashable(OpExpr *testexpr, List *param_ids);
+static bool hash_ok_operator(OpExpr *expr);
+static bool contain_dml(Node *node);
+static bool contain_dml_walker(Node *node, void *context);
+static bool contain_outer_selfref(Node *node);
+static bool contain_outer_selfref_walker(Node *node, Index *depth);
+static void inline_cte(PlannerInfo *root, CommonTableExpr *cte);
+static bool inline_cte_walker(Node *node, inline_cte_walker_context *context);
+static bool simplify_EXISTS_query(PlannerInfo *root, Query *query);
+static Query *convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect,
+ Node **testexpr, List **paramIds);
+static Node *replace_correlation_vars_mutator(Node *node, PlannerInfo *root);
+static Node *process_sublinks_mutator(Node *node,
+ process_sublinks_context *context);
+static Bitmapset *finalize_plan(PlannerInfo *root,
+ Plan *plan,
+ int gather_param,
+ Bitmapset *valid_params,
+ Bitmapset *scan_params);
+static bool finalize_primnode(Node *node, finalize_primnode_context *context);
+static bool finalize_agg_primnode(Node *node, finalize_primnode_context *context);
+
+
+/*
+ * Get the datatype/typmod/collation of the first column of the plan's output.
+ *
+ * This information is stored for ARRAY_SUBLINK execution and for
+ * exprType()/exprTypmod()/exprCollation(), which have no way to get at the
+ * plan associated with a SubPlan node. We really only need the info for
+ * EXPR_SUBLINK and ARRAY_SUBLINK subplans, but for consistency we save it
+ * always.
+ */
+static void
+get_first_col_type(Plan *plan, Oid *coltype, int32 *coltypmod,
+ Oid *colcollation)
+{
+ /* In cases such as EXISTS, tlist might be empty; arbitrarily use VOID */
+ if (plan->targetlist)
+ {
+ TargetEntry *tent = linitial_node(TargetEntry, plan->targetlist);
+
+ if (!tent->resjunk)
+ {
+ *coltype = exprType((Node *) tent->expr);
+ *coltypmod = exprTypmod((Node *) tent->expr);
+ *colcollation = exprCollation((Node *) tent->expr);
+ return;
+ }
+ }
+ *coltype = VOIDOID;
+ *coltypmod = -1;
+ *colcollation = InvalidOid;
+}
+
+/*
+ * Convert a SubLink (as created by the parser) into a SubPlan.
+ *
+ * We are given the SubLink's contained query, type, ID, and testexpr. We are
+ * also told if this expression appears at top level of a WHERE/HAVING qual.
+ *
+ * Note: we assume that the testexpr has been AND/OR flattened (actually,
+ * it's been through eval_const_expressions), but not converted to
+ * implicit-AND form; and any SubLinks in it should already have been
+ * converted to SubPlans. The subquery is as yet untouched, however.
+ *
+ * The result is whatever we need to substitute in place of the SubLink node
+ * in the executable expression. If we're going to do the subplan as a
+ * regular subplan, this will be the constructed SubPlan node. If we're going
+ * to do the subplan as an InitPlan, the SubPlan node instead goes into
+ * root->init_plans, and what we return here is an expression tree
+ * representing the InitPlan's result: usually just a Param node representing
+ * a single scalar result, but possibly a row comparison tree containing
+ * multiple Param nodes, or for a MULTIEXPR subquery a simple NULL constant
+ * (since the real output Params are elsewhere in the tree, and the MULTIEXPR
+ * subquery itself is in a resjunk tlist entry whose value is uninteresting).
+ */
+static Node *
+make_subplan(PlannerInfo *root, Query *orig_subquery,
+ SubLinkType subLinkType, int subLinkId,
+ Node *testexpr, bool isTopQual)
+{
+ Query *subquery;
+ bool simple_exists = false;
+ double tuple_fraction;
+ PlannerInfo *subroot;
+ RelOptInfo *final_rel;
+ Path *best_path;
+ Plan *plan;
+ List *plan_params;
+ Node *result;
+
+ /*
+ * Copy the source Query node. This is a quick and dirty kluge to resolve
+ * the fact that the parser can generate trees with multiple links to the
+ * same sub-Query node, but the planner wants to scribble on the Query.
+ * Try to clean this up when we do querytree redesign...
+ */
+ subquery = copyObject(orig_subquery);
+
+ /*
+ * If it's an EXISTS subplan, we might be able to simplify it.
+ */
+ if (subLinkType == EXISTS_SUBLINK)
+ simple_exists = simplify_EXISTS_query(root, subquery);
+
+ /*
+ * For an EXISTS subplan, tell lower-level planner to expect that only the
+ * first tuple will be retrieved. For ALL and ANY subplans, we will be
+ * able to stop evaluating if the test condition fails or matches, so very
+ * often not all the tuples will be retrieved; for lack of a better idea,
+ * specify 50% retrieval. For EXPR, MULTIEXPR, and ROWCOMPARE subplans,
+ * use default behavior (we're only expecting one row out, anyway).
+ *
+ * NOTE: if you change these numbers, also change cost_subplan() in
+ * path/costsize.c.
+ *
+ * XXX If an ANY subplan is uncorrelated, build_subplan may decide to hash
+ * its output. In that case it would've been better to specify full
+ * retrieval. At present, however, we can only check hashability after
+ * we've made the subplan :-(. (Determining whether it'll fit in hash_mem
+ * is the really hard part.) Therefore, we don't want to be too
+ * optimistic about the percentage of tuples retrieved, for fear of
+ * selecting a plan that's bad for the materialization case.
+ */
+ if (subLinkType == EXISTS_SUBLINK)
+ tuple_fraction = 1.0; /* just like a LIMIT 1 */
+ else if (subLinkType == ALL_SUBLINK ||
+ subLinkType == ANY_SUBLINK)
+ tuple_fraction = 0.5; /* 50% */
+ else
+ tuple_fraction = 0.0; /* default behavior */
+
+ /* plan_params should not be in use in current query level */
+ Assert(root->plan_params == NIL);
+
+ /* Generate Paths for the subquery */
+ subroot = subquery_planner(root->glob, subquery,
+ root,
+ false, tuple_fraction);
+
+ /* Isolate the params needed by this specific subplan */
+ plan_params = root->plan_params;
+ root->plan_params = NIL;
+
+ /*
+ * Select best Path and turn it into a Plan. At least for now, there
+ * seems no reason to postpone doing that.
+ */
+ final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
+ best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
+
+ plan = create_plan(subroot, best_path);
+
+ /* And convert to SubPlan or InitPlan format. */
+ result = build_subplan(root, plan, subroot, plan_params,
+ subLinkType, subLinkId,
+ testexpr, NIL, isTopQual);
+
+ /*
+ * If it's a correlated EXISTS with an unimportant targetlist, we might be
+ * able to transform it to the equivalent of an IN and then implement it
+ * by hashing. We don't have enough information yet to tell which way is
+ * likely to be better (it depends on the expected number of executions of
+ * the EXISTS qual, and we are much too early in planning the outer query
+ * to be able to guess that). So we generate both plans, if possible, and
+ * leave it to setrefs.c to decide which to use.
+ */
+ if (simple_exists && IsA(result, SubPlan))
+ {
+ Node *newtestexpr;
+ List *paramIds;
+
+ /* Make a second copy of the original subquery */
+ subquery = copyObject(orig_subquery);
+ /* and re-simplify */
+ simple_exists = simplify_EXISTS_query(root, subquery);
+ Assert(simple_exists);
+ /* See if it can be converted to an ANY query */
+ subquery = convert_EXISTS_to_ANY(root, subquery,
+ &newtestexpr, &paramIds);
+ if (subquery)
+ {
+ /* Generate Paths for the ANY subquery; we'll need all rows */
+ subroot = subquery_planner(root->glob, subquery,
+ root,
+ false, 0.0);
+
+ /* Isolate the params needed by this specific subplan */
+ plan_params = root->plan_params;
+ root->plan_params = NIL;
+
+ /* Select best Path */
+ final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
+ best_path = final_rel->cheapest_total_path;
+
+ /* Now we can check if it'll fit in hash_mem */
+ if (subpath_is_hashable(best_path))
+ {
+ SubPlan *hashplan;
+ AlternativeSubPlan *asplan;
+
+ /* OK, finish planning the ANY subquery */
+ plan = create_plan(subroot, best_path);
+
+ /* ... and convert to SubPlan format */
+ hashplan = castNode(SubPlan,
+ build_subplan(root, plan, subroot,
+ plan_params,
+ ANY_SUBLINK, 0,
+ newtestexpr,
+ paramIds,
+ true));
+ /* Check we got what we expected */
+ Assert(hashplan->parParam == NIL);
+ Assert(hashplan->useHashTable);
+
+ /* Leave it to setrefs.c to decide which plan to use */
+ asplan = makeNode(AlternativeSubPlan);
+ asplan->subplans = list_make2(result, hashplan);
+ result = (Node *) asplan;
+ root->hasAlternativeSubPlans = true;
+ }
+ }
+ }
+
+ return result;
+}
+
+/*
+ * Build a SubPlan node given the raw inputs --- subroutine for make_subplan
+ *
+ * Returns either the SubPlan, or a replacement expression if we decide to
+ * make it an InitPlan, as explained in the comments for make_subplan.
+ */
+static Node *
+build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot,
+ List *plan_params,
+ SubLinkType subLinkType, int subLinkId,
+ Node *testexpr, List *testexpr_paramids,
+ bool unknownEqFalse)
+{
+ Node *result;
+ SubPlan *splan;
+ bool isInitPlan;
+ ListCell *lc;
+
+ /*
+ * Initialize the SubPlan node. Note plan_id, plan_name, and cost fields
+ * are set further down.
+ */
+ splan = makeNode(SubPlan);
+ splan->subLinkType = subLinkType;
+ splan->testexpr = NULL;
+ splan->paramIds = NIL;
+ get_first_col_type(plan, &splan->firstColType, &splan->firstColTypmod,
+ &splan->firstColCollation);
+ splan->useHashTable = false;
+ splan->unknownEqFalse = unknownEqFalse;
+ splan->parallel_safe = plan->parallel_safe;
+ splan->setParam = NIL;
+ splan->parParam = NIL;
+ splan->args = NIL;
+
+ /*
+ * Make parParam and args lists of param IDs and expressions that current
+ * query level will pass to this child plan.
+ */
+ foreach(lc, plan_params)
+ {
+ PlannerParamItem *pitem = (PlannerParamItem *) lfirst(lc);
+ Node *arg = pitem->item;
+
+ /*
+ * The Var, PlaceHolderVar, Aggref or GroupingFunc has already been
+ * adjusted to have the correct varlevelsup, phlevelsup, or
+ * agglevelsup.
+ *
+ * If it's a PlaceHolderVar, Aggref or GroupingFunc, its arguments
+ * might contain SubLinks, which have not yet been processed (see the
+ * comments for SS_replace_correlation_vars). Do that now.
+ */
+ if (IsA(arg, PlaceHolderVar) ||
+ IsA(arg, Aggref) ||
+ IsA(arg, GroupingFunc))
+ arg = SS_process_sublinks(root, arg, false);
+
+ splan->parParam = lappend_int(splan->parParam, pitem->paramId);
+ splan->args = lappend(splan->args, arg);
+ }
+
+ /*
+ * Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY,
+ * ROWCOMPARE, or MULTIEXPR types can be used as initPlans. For EXISTS,
+ * EXPR, or ARRAY, we return a Param referring to the result of evaluating
+ * the initPlan. For ROWCOMPARE, we must modify the testexpr tree to
+ * contain PARAM_EXEC Params instead of the PARAM_SUBLINK Params emitted
+ * by the parser, and then return that tree. For MULTIEXPR, we return a
+ * null constant: the resjunk targetlist item containing the SubLink does
+ * not need to return anything useful, since the referencing Params are
+ * elsewhere.
+ */
+ if (splan->parParam == NIL && subLinkType == EXISTS_SUBLINK)
+ {
+ Param *prm;
+
+ Assert(testexpr == NULL);
+ prm = generate_new_exec_param(root, BOOLOID, -1, InvalidOid);
+ splan->setParam = list_make1_int(prm->paramid);
+ isInitPlan = true;
+ result = (Node *) prm;
+ }
+ else if (splan->parParam == NIL && subLinkType == EXPR_SUBLINK)
+ {
+ TargetEntry *te = linitial(plan->targetlist);
+ Param *prm;
+
+ Assert(!te->resjunk);
+ Assert(testexpr == NULL);
+ prm = generate_new_exec_param(root,
+ exprType((Node *) te->expr),
+ exprTypmod((Node *) te->expr),
+ exprCollation((Node *) te->expr));
+ splan->setParam = list_make1_int(prm->paramid);
+ isInitPlan = true;
+ result = (Node *) prm;
+ }
+ else if (splan->parParam == NIL && subLinkType == ARRAY_SUBLINK)
+ {
+ TargetEntry *te = linitial(plan->targetlist);
+ Oid arraytype;
+ Param *prm;
+
+ Assert(!te->resjunk);
+ Assert(testexpr == NULL);
+ arraytype = get_promoted_array_type(exprType((Node *) te->expr));
+ if (!OidIsValid(arraytype))
+ elog(ERROR, "could not find array type for datatype %s",
+ format_type_be(exprType((Node *) te->expr)));
+ prm = generate_new_exec_param(root,
+ arraytype,
+ exprTypmod((Node *) te->expr),
+ exprCollation((Node *) te->expr));
+ splan->setParam = list_make1_int(prm->paramid);
+ isInitPlan = true;
+ result = (Node *) prm;
+ }
+ else if (splan->parParam == NIL && subLinkType == ROWCOMPARE_SUBLINK)
+ {
+ /* Adjust the Params */
+ List *params;
+
+ Assert(testexpr != NULL);
+ params = generate_subquery_params(root,
+ plan->targetlist,
+ &splan->paramIds);
+ result = convert_testexpr(root,
+ testexpr,
+ params);
+ splan->setParam = list_copy(splan->paramIds);
+ isInitPlan = true;
+
+ /*
+ * The executable expression is returned to become part of the outer
+ * plan's expression tree; it is not kept in the initplan node.
+ */
+ }
+ else if (subLinkType == MULTIEXPR_SUBLINK)
+ {
+ /*
+ * Whether it's an initplan or not, it needs to set a PARAM_EXEC Param
+ * for each output column.
+ */
+ List *params;
+
+ Assert(testexpr == NULL);
+ params = generate_subquery_params(root,
+ plan->targetlist,
+ &splan->setParam);
+
+ /*
+ * Save the list of replacement Params in the n'th cell of
+ * root->multiexpr_params; setrefs.c will use it to replace
+ * PARAM_MULTIEXPR Params.
+ */
+ while (list_length(root->multiexpr_params) < subLinkId)
+ root->multiexpr_params = lappend(root->multiexpr_params, NIL);
+ lc = list_nth_cell(root->multiexpr_params, subLinkId - 1);
+ Assert(lfirst(lc) == NIL);
+ lfirst(lc) = params;
+
+ /* It can be an initplan if there are no parParams. */
+ if (splan->parParam == NIL)
+ {
+ isInitPlan = true;
+ result = (Node *) makeNullConst(RECORDOID, -1, InvalidOid);
+ }
+ else
+ {
+ isInitPlan = false;
+ result = (Node *) splan;
+ }
+ }
+ else
+ {
+ /*
+ * Adjust the Params in the testexpr, unless caller already took care
+ * of it (as indicated by passing a list of Param IDs).
+ */
+ if (testexpr && testexpr_paramids == NIL)
+ {
+ List *params;
+
+ params = generate_subquery_params(root,
+ plan->targetlist,
+ &splan->paramIds);
+ splan->testexpr = convert_testexpr(root,
+ testexpr,
+ params);
+ }
+ else
+ {
+ splan->testexpr = testexpr;
+ splan->paramIds = testexpr_paramids;
+ }
+
+ /*
+ * We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types to
+ * initPlans, even when they are uncorrelated or undirect correlated,
+ * because we need to scan the output of the subplan for each outer
+ * tuple. But if it's a not-direct-correlated IN (= ANY) test, we
+ * might be able to use a hashtable to avoid comparing all the tuples.
+ */
+ if (subLinkType == ANY_SUBLINK &&
+ splan->parParam == NIL &&
+ subplan_is_hashable(plan) &&
+ testexpr_is_hashable(splan->testexpr, splan->paramIds))
+ splan->useHashTable = true;
+
+ /*
+ * Otherwise, we have the option to tack a Material node onto the top
+ * of the subplan, to reduce the cost of reading it repeatedly. This
+ * is pointless for a direct-correlated subplan, since we'd have to
+ * recompute its results each time anyway. For uncorrelated/undirect
+ * correlated subplans, we add Material unless the subplan's top plan
+ * node would materialize its output anyway. Also, if enable_material
+ * is false, then the user does not want us to materialize anything
+ * unnecessarily, so we don't.
+ */
+ else if (splan->parParam == NIL && enable_material &&
+ !ExecMaterializesOutput(nodeTag(plan)))
+ plan = materialize_finished_plan(plan);
+
+ result = (Node *) splan;
+ isInitPlan = false;
+ }
+
+ /*
+ * Add the subplan and its PlannerInfo to the global lists.
+ */
+ root->glob->subplans = lappend(root->glob->subplans, plan);
+ root->glob->subroots = lappend(root->glob->subroots, subroot);
+ splan->plan_id = list_length(root->glob->subplans);
+
+ if (isInitPlan)
+ root->init_plans = lappend(root->init_plans, splan);
+
+ /*
+ * A parameterless subplan (not initplan) should be prepared to handle
+ * REWIND efficiently. If it has direct parameters then there's no point
+ * since it'll be reset on each scan anyway; and if it's an initplan then
+ * there's no point since it won't get re-run without parameter changes
+ * anyway. The input of a hashed subplan doesn't need REWIND either.
+ */
+ if (splan->parParam == NIL && !isInitPlan && !splan->useHashTable)
+ root->glob->rewindPlanIDs = bms_add_member(root->glob->rewindPlanIDs,
+ splan->plan_id);
+
+ /* Label the subplan for EXPLAIN purposes */
+ splan->plan_name = palloc(32 + 12 * list_length(splan->setParam));
+ sprintf(splan->plan_name, "%s %d",
+ isInitPlan ? "InitPlan" : "SubPlan",
+ splan->plan_id);
+ if (splan->setParam)
+ {
+ char *ptr = splan->plan_name + strlen(splan->plan_name);
+
+ ptr += sprintf(ptr, " (returns ");
+ foreach(lc, splan->setParam)
+ {
+ ptr += sprintf(ptr, "$%d%s",
+ lfirst_int(lc),
+ lnext(splan->setParam, lc) ? "," : ")");
+ }
+ }
+
+ /* Lastly, fill in the cost estimates for use later */
+ cost_subplan(root, splan, plan);
+
+ return result;
+}
+
+/*
+ * generate_subquery_params: build a list of Params representing the output
+ * columns of a sublink's sub-select, given the sub-select's targetlist.
+ *
+ * We also return an integer list of the paramids of the Params.
+ */
+static List *
+generate_subquery_params(PlannerInfo *root, List *tlist, List **paramIds)
+{
+ List *result;
+ List *ids;
+ ListCell *lc;
+
+ result = ids = NIL;
+ foreach(lc, tlist)
+ {
+ TargetEntry *tent = (TargetEntry *) lfirst(lc);
+ Param *param;
+
+ if (tent->resjunk)
+ continue;
+
+ param = generate_new_exec_param(root,
+ exprType((Node *) tent->expr),
+ exprTypmod((Node *) tent->expr),
+ exprCollation((Node *) tent->expr));
+ result = lappend(result, param);
+ ids = lappend_int(ids, param->paramid);
+ }
+
+ *paramIds = ids;
+ return result;
+}
+
+/*
+ * generate_subquery_vars: build a list of Vars representing the output
+ * columns of a sublink's sub-select, given the sub-select's targetlist.
+ * The Vars have the specified varno (RTE index).
+ */
+static List *
+generate_subquery_vars(PlannerInfo *root, List *tlist, Index varno)
+{
+ List *result;
+ ListCell *lc;
+
+ result = NIL;
+ foreach(lc, tlist)
+ {
+ TargetEntry *tent = (TargetEntry *) lfirst(lc);
+ Var *var;
+
+ if (tent->resjunk)
+ continue;
+
+ var = makeVarFromTargetEntry(varno, tent);
+ result = lappend(result, var);
+ }
+
+ return result;
+}
+
+/*
+ * convert_testexpr: convert the testexpr given by the parser into
+ * actually executable form. This entails replacing PARAM_SUBLINK Params
+ * with Params or Vars representing the results of the sub-select. The
+ * nodes to be substituted are passed in as the List result from
+ * generate_subquery_params or generate_subquery_vars.
+ */
+static Node *
+convert_testexpr(PlannerInfo *root,
+ Node *testexpr,
+ List *subst_nodes)
+{
+ convert_testexpr_context context;
+
+ context.root = root;
+ context.subst_nodes = subst_nodes;
+ return convert_testexpr_mutator(testexpr, &context);
+}
+
+static Node *
+convert_testexpr_mutator(Node *node,
+ convert_testexpr_context *context)
+{
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, Param))
+ {
+ Param *param = (Param *) node;
+
+ if (param->paramkind == PARAM_SUBLINK)
+ {
+ if (param->paramid <= 0 ||
+ param->paramid > list_length(context->subst_nodes))
+ elog(ERROR, "unexpected PARAM_SUBLINK ID: %d", param->paramid);
+
+ /*
+ * We copy the list item to avoid having doubly-linked
+ * substructure in the modified parse tree. This is probably
+ * unnecessary when it's a Param, but be safe.
+ */
+ return (Node *) copyObject(list_nth(context->subst_nodes,
+ param->paramid - 1));
+ }
+ }
+ if (IsA(node, SubLink))
+ {
+ /*
+ * If we come across a nested SubLink, it is neither necessary nor
+ * correct to recurse into it: any PARAM_SUBLINKs we might find inside
+ * belong to the inner SubLink not the outer. So just return it as-is.
+ *
+ * This reasoning depends on the assumption that nothing will pull
+ * subexpressions into or out of the testexpr field of a SubLink, at
+ * least not without replacing PARAM_SUBLINKs first. If we did want
+ * to do that we'd need to rethink the parser-output representation
+ * altogether, since currently PARAM_SUBLINKs are only unique per
+ * SubLink not globally across the query. The whole point of
+ * replacing them with Vars or PARAM_EXEC nodes is to make them
+ * globally unique before they escape from the SubLink's testexpr.
+ *
+ * Note: this can't happen when called during SS_process_sublinks,
+ * because that recursively processes inner SubLinks first. It can
+ * happen when called from convert_ANY_sublink_to_join, though.
+ */
+ return node;
+ }
+ return expression_tree_mutator(node,
+ convert_testexpr_mutator,
+ (void *) context);
+}
+
+/*
+ * subplan_is_hashable: can we implement an ANY subplan by hashing?
+ *
+ * This is not responsible for checking whether the combining testexpr
+ * is suitable for hashing. We only look at the subquery itself.
+ */
+static bool
+subplan_is_hashable(Plan *plan)
+{
+ double subquery_size;
+
+ /*
+ * The estimated size of the subquery result must fit in hash_mem. (Note:
+ * we use heap tuple overhead here even though the tuples will actually be
+ * stored as MinimalTuples; this provides some fudge factor for hashtable
+ * overhead.)
+ */
+ subquery_size = plan->plan_rows *
+ (MAXALIGN(plan->plan_width) + MAXALIGN(SizeofHeapTupleHeader));
+ if (subquery_size > get_hash_memory_limit())
+ return false;
+
+ return true;
+}
+
+/*
+ * subpath_is_hashable: can we implement an ANY subplan by hashing?
+ *
+ * Identical to subplan_is_hashable, but work from a Path for the subplan.
+ */
+static bool
+subpath_is_hashable(Path *path)
+{
+ double subquery_size;
+
+ /*
+ * The estimated size of the subquery result must fit in hash_mem. (Note:
+ * we use heap tuple overhead here even though the tuples will actually be
+ * stored as MinimalTuples; this provides some fudge factor for hashtable
+ * overhead.)
+ */
+ subquery_size = path->rows *
+ (MAXALIGN(path->pathtarget->width) + MAXALIGN(SizeofHeapTupleHeader));
+ if (subquery_size > get_hash_memory_limit())
+ return false;
+
+ return true;
+}
+
+/*
+ * testexpr_is_hashable: is an ANY SubLink's test expression hashable?
+ *
+ * To identify LHS vs RHS of the hash expression, we must be given the
+ * list of output Param IDs of the SubLink's subquery.
+ */
+static bool
+testexpr_is_hashable(Node *testexpr, List *param_ids)
+{
+ /*
+ * The testexpr must be a single OpExpr, or an AND-clause containing only
+ * OpExprs, each of which satisfy test_opexpr_is_hashable().
+ */
+ if (testexpr && IsA(testexpr, OpExpr))
+ {
+ if (test_opexpr_is_hashable((OpExpr *) testexpr, param_ids))
+ return true;
+ }
+ else if (is_andclause(testexpr))
+ {
+ ListCell *l;
+
+ foreach(l, ((BoolExpr *) testexpr)->args)
+ {
+ Node *andarg = (Node *) lfirst(l);
+
+ if (!IsA(andarg, OpExpr))
+ return false;
+ if (!test_opexpr_is_hashable((OpExpr *) andarg, param_ids))
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+test_opexpr_is_hashable(OpExpr *testexpr, List *param_ids)
+{
+ /*
+ * The combining operator must be hashable and strict. The need for
+ * hashability is obvious, since we want to use hashing. Without
+ * strictness, behavior in the presence of nulls is too unpredictable. We
+ * actually must assume even more than plain strictness: it can't yield
+ * NULL for non-null inputs, either (see nodeSubplan.c). However, hash
+ * indexes and hash joins assume that too.
+ */
+ if (!hash_ok_operator(testexpr))
+ return false;
+
+ /*
+ * The left and right inputs must belong to the outer and inner queries
+ * respectively; hence Params that will be supplied by the subquery must
+ * not appear in the LHS, and Vars of the outer query must not appear in
+ * the RHS. (Ordinarily, this must be true because of the way that the
+ * parser builds an ANY SubLink's testexpr ... but inlining of functions
+ * could have changed the expression's structure, so we have to check.
+ * Such cases do not occur often enough to be worth trying to optimize, so
+ * we don't worry about trying to commute the clause or anything like
+ * that; we just need to be sure not to build an invalid plan.)
+ */
+ if (list_length(testexpr->args) != 2)
+ return false;
+ if (contain_exec_param((Node *) linitial(testexpr->args), param_ids))
+ return false;
+ if (contain_var_clause((Node *) lsecond(testexpr->args)))
+ return false;
+ return true;
+}
+
+/*
+ * Check expression is hashable + strict
+ *
+ * We could use op_hashjoinable() and op_strict(), but do it like this to
+ * avoid a redundant cache lookup.
+ */
+static bool
+hash_ok_operator(OpExpr *expr)
+{
+ Oid opid = expr->opno;
+
+ /* quick out if not a binary operator */
+ if (list_length(expr->args) != 2)
+ return false;
+ if (opid == ARRAY_EQ_OP ||
+ opid == RECORD_EQ_OP)
+ {
+ /* these are strict, but must check input type to ensure hashable */
+ Node *leftarg = linitial(expr->args);
+
+ return op_hashjoinable(opid, exprType(leftarg));
+ }
+ else
+ {
+ /* else must look up the operator properties */
+ HeapTuple tup;
+ Form_pg_operator optup;
+
+ tup = SearchSysCache1(OPEROID, ObjectIdGetDatum(opid));
+ if (!HeapTupleIsValid(tup))
+ elog(ERROR, "cache lookup failed for operator %u", opid);
+ optup = (Form_pg_operator) GETSTRUCT(tup);
+ if (!optup->oprcanhash || !func_strict(optup->oprcode))
+ {
+ ReleaseSysCache(tup);
+ return false;
+ }
+ ReleaseSysCache(tup);
+ return true;
+ }
+}
+
+
+/*
+ * SS_process_ctes: process a query's WITH list
+ *
+ * Consider each CTE in the WITH list and either ignore it (if it's an
+ * unreferenced SELECT), "inline" it to create a regular sub-SELECT-in-FROM,
+ * or convert it to an initplan.
+ *
+ * A side effect is to fill in root->cte_plan_ids with a list that
+ * parallels root->parse->cteList and provides the subplan ID for
+ * each CTE's initplan, or a dummy ID (-1) if we didn't make an initplan.
+ */
+void
+SS_process_ctes(PlannerInfo *root)
+{
+ ListCell *lc;
+
+ Assert(root->cte_plan_ids == NIL);
+
+ foreach(lc, root->parse->cteList)
+ {
+ CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
+ CmdType cmdType = ((Query *) cte->ctequery)->commandType;
+ Query *subquery;
+ PlannerInfo *subroot;
+ RelOptInfo *final_rel;
+ Path *best_path;
+ Plan *plan;
+ SubPlan *splan;
+ int paramid;
+
+ /*
+ * Ignore SELECT CTEs that are not actually referenced anywhere.
+ */
+ if (cte->cterefcount == 0 && cmdType == CMD_SELECT)
+ {
+ /* Make a dummy entry in cte_plan_ids */
+ root->cte_plan_ids = lappend_int(root->cte_plan_ids, -1);
+ continue;
+ }
+
+ /*
+ * Consider inlining the CTE (creating RTE_SUBQUERY RTE(s)) instead of
+ * implementing it as a separately-planned CTE.
+ *
+ * We cannot inline if any of these conditions hold:
+ *
+ * 1. The user said not to (the CTEMaterializeAlways option).
+ *
+ * 2. The CTE is recursive.
+ *
+ * 3. The CTE has side-effects; this includes either not being a plain
+ * SELECT, or containing volatile functions. Inlining might change
+ * the side-effects, which would be bad.
+ *
+ * 4. The CTE is multiply-referenced and contains a self-reference to
+ * a recursive CTE outside itself. Inlining would result in multiple
+ * recursive self-references, which we don't support.
+ *
+ * Otherwise, we have an option whether to inline or not. That should
+ * always be a win if there's just a single reference, but if the CTE
+ * is multiply-referenced then it's unclear: inlining adds duplicate
+ * computations, but the ability to absorb restrictions from the outer
+ * query level could outweigh that. We do not have nearly enough
+ * information at this point to tell whether that's true, so we let
+ * the user express a preference. Our default behavior is to inline
+ * only singly-referenced CTEs, but a CTE marked CTEMaterializeNever
+ * will be inlined even if multiply referenced.
+ *
+ * Note: we check for volatile functions last, because that's more
+ * expensive than the other tests needed.
+ */
+ if ((cte->ctematerialized == CTEMaterializeNever ||
+ (cte->ctematerialized == CTEMaterializeDefault &&
+ cte->cterefcount == 1)) &&
+ !cte->cterecursive &&
+ cmdType == CMD_SELECT &&
+ !contain_dml(cte->ctequery) &&
+ (cte->cterefcount <= 1 ||
+ !contain_outer_selfref(cte->ctequery)) &&
+ !contain_volatile_functions(cte->ctequery))
+ {
+ inline_cte(root, cte);
+ /* Make a dummy entry in cte_plan_ids */
+ root->cte_plan_ids = lappend_int(root->cte_plan_ids, -1);
+ continue;
+ }
+
+ /*
+ * Copy the source Query node. Probably not necessary, but let's keep
+ * this similar to make_subplan.
+ */
+ subquery = (Query *) copyObject(cte->ctequery);
+
+ /* plan_params should not be in use in current query level */
+ Assert(root->plan_params == NIL);
+
+ /*
+ * Generate Paths for the CTE query. Always plan for full retrieval
+ * --- we don't have enough info to predict otherwise.
+ */
+ subroot = subquery_planner(root->glob, subquery,
+ root,
+ cte->cterecursive, 0.0);
+
+ /*
+ * Since the current query level doesn't yet contain any RTEs, it
+ * should not be possible for the CTE to have requested parameters of
+ * this level.
+ */
+ if (root->plan_params)
+ elog(ERROR, "unexpected outer reference in CTE query");
+
+ /*
+ * Select best Path and turn it into a Plan. At least for now, there
+ * seems no reason to postpone doing that.
+ */
+ final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
+ best_path = final_rel->cheapest_total_path;
+
+ plan = create_plan(subroot, best_path);
+
+ /*
+ * Make a SubPlan node for it. This is just enough unlike
+ * build_subplan that we can't share code.
+ *
+ * Note plan_id, plan_name, and cost fields are set further down.
+ */
+ splan = makeNode(SubPlan);
+ splan->subLinkType = CTE_SUBLINK;
+ splan->testexpr = NULL;
+ splan->paramIds = NIL;
+ get_first_col_type(plan, &splan->firstColType, &splan->firstColTypmod,
+ &splan->firstColCollation);
+ splan->useHashTable = false;
+ splan->unknownEqFalse = false;
+
+ /*
+ * CTE scans are not considered for parallelism (cf
+ * set_rel_consider_parallel), and even if they were, initPlans aren't
+ * parallel-safe.
+ */
+ splan->parallel_safe = false;
+ splan->setParam = NIL;
+ splan->parParam = NIL;
+ splan->args = NIL;
+
+ /*
+ * The node can't have any inputs (since it's an initplan), so the
+ * parParam and args lists remain empty. (It could contain references
+ * to earlier CTEs' output param IDs, but CTE outputs are not
+ * propagated via the args list.)
+ */
+
+ /*
+ * Assign a param ID to represent the CTE's output. No ordinary
+ * "evaluation" of this param slot ever happens, but we use the param
+ * ID for setParam/chgParam signaling just as if the CTE plan were
+ * returning a simple scalar output. (Also, the executor abuses the
+ * ParamExecData slot for this param ID for communication among
+ * multiple CteScan nodes that might be scanning this CTE.)
+ */
+ paramid = assign_special_exec_param(root);
+ splan->setParam = list_make1_int(paramid);
+
+ /*
+ * Add the subplan and its PlannerInfo to the global lists.
+ */
+ root->glob->subplans = lappend(root->glob->subplans, plan);
+ root->glob->subroots = lappend(root->glob->subroots, subroot);
+ splan->plan_id = list_length(root->glob->subplans);
+
+ root->init_plans = lappend(root->init_plans, splan);
+
+ root->cte_plan_ids = lappend_int(root->cte_plan_ids, splan->plan_id);
+
+ /* Label the subplan for EXPLAIN purposes */
+ splan->plan_name = psprintf("CTE %s", cte->ctename);
+
+ /* Lastly, fill in the cost estimates for use later */
+ cost_subplan(root, splan, plan);
+ }
+}
+
+/*
+ * contain_dml: is any subquery not a plain SELECT?
+ *
+ * We reject SELECT FOR UPDATE/SHARE as well as INSERT etc.
+ */
+static bool
+contain_dml(Node *node)
+{
+ return contain_dml_walker(node, NULL);
+}
+
+static bool
+contain_dml_walker(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Query))
+ {
+ Query *query = (Query *) node;
+
+ if (query->commandType != CMD_SELECT ||
+ query->rowMarks != NIL)
+ return true;
+
+ return query_tree_walker(query, contain_dml_walker, context, 0);
+ }
+ return expression_tree_walker(node, contain_dml_walker, context);
+}
+
+/*
+ * contain_outer_selfref: is there an external recursive self-reference?
+ */
+static bool
+contain_outer_selfref(Node *node)
+{
+ Index depth = 0;
+
+ /*
+ * We should be starting with a Query, so that depth will be 1 while
+ * examining its immediate contents.
+ */
+ Assert(IsA(node, Query));
+
+ return contain_outer_selfref_walker(node, &depth);
+}
+
+static bool
+contain_outer_selfref_walker(Node *node, Index *depth)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, RangeTblEntry))
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) node;
+
+ /*
+ * Check for a self-reference to a CTE that's above the Query that our
+ * search started at.
+ */
+ if (rte->rtekind == RTE_CTE &&
+ rte->self_reference &&
+ rte->ctelevelsup >= *depth)
+ return true;
+ return false; /* allow range_table_walker to continue */
+ }
+ if (IsA(node, Query))
+ {
+ /* Recurse into subquery, tracking nesting depth properly */
+ Query *query = (Query *) node;
+ bool result;
+
+ (*depth)++;
+
+ result = query_tree_walker(query, contain_outer_selfref_walker,
+ (void *) depth, QTW_EXAMINE_RTES_BEFORE);
+
+ (*depth)--;
+
+ return result;
+ }
+ return expression_tree_walker(node, contain_outer_selfref_walker,
+ (void *) depth);
+}
+
+/*
+ * inline_cte: convert RTE_CTE references to given CTE into RTE_SUBQUERYs
+ */
+static void
+inline_cte(PlannerInfo *root, CommonTableExpr *cte)
+{
+ struct inline_cte_walker_context context;
+
+ context.ctename = cte->ctename;
+ /* Start at levelsup = -1 because we'll immediately increment it */
+ context.levelsup = -1;
+ context.ctequery = castNode(Query, cte->ctequery);
+
+ (void) inline_cte_walker((Node *) root->parse, &context);
+}
+
+static bool
+inline_cte_walker(Node *node, inline_cte_walker_context *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Query))
+ {
+ Query *query = (Query *) node;
+
+ context->levelsup++;
+
+ /*
+ * Visit the query's RTE nodes after their contents; otherwise
+ * query_tree_walker would descend into the newly inlined CTE query,
+ * which we don't want.
+ */
+ (void) query_tree_walker(query, inline_cte_walker, context,
+ QTW_EXAMINE_RTES_AFTER);
+
+ context->levelsup--;
+
+ return false;
+ }
+ else if (IsA(node, RangeTblEntry))
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) node;
+
+ if (rte->rtekind == RTE_CTE &&
+ strcmp(rte->ctename, context->ctename) == 0 &&
+ rte->ctelevelsup == context->levelsup)
+ {
+ /*
+ * Found a reference to replace. Generate a copy of the CTE query
+ * with appropriate level adjustment for outer references (e.g.,
+ * to other CTEs).
+ */
+ Query *newquery = copyObject(context->ctequery);
+
+ if (context->levelsup > 0)
+ IncrementVarSublevelsUp((Node *) newquery, context->levelsup, 1);
+
+ /*
+ * Convert the RTE_CTE RTE into a RTE_SUBQUERY.
+ *
+ * Historically, a FOR UPDATE clause has been treated as extending
+ * into views and subqueries, but not into CTEs. We preserve this
+ * distinction by not trying to push rowmarks into the new
+ * subquery.
+ */
+ rte->rtekind = RTE_SUBQUERY;
+ rte->subquery = newquery;
+ rte->security_barrier = false;
+
+ /* Zero out CTE-specific fields */
+ rte->ctename = NULL;
+ rte->ctelevelsup = 0;
+ rte->self_reference = false;
+ rte->coltypes = NIL;
+ rte->coltypmods = NIL;
+ rte->colcollations = NIL;
+ }
+
+ return false;
+ }
+
+ return expression_tree_walker(node, inline_cte_walker, context);
+}
+
+
+/*
+ * convert_ANY_sublink_to_join: try to convert an ANY SubLink to a join
+ *
+ * The caller has found an ANY SubLink at the top level of one of the query's
+ * qual clauses, but has not checked the properties of the SubLink further.
+ * Decide whether it is appropriate to process this SubLink in join style.
+ * If so, form a JoinExpr and return it. Return NULL if the SubLink cannot
+ * be converted to a join.
+ *
+ * The only non-obvious input parameter is available_rels: this is the set
+ * of query rels that can safely be referenced in the sublink expression.
+ * (We must restrict this to avoid changing the semantics when a sublink
+ * is present in an outer join's ON qual.) The conversion must fail if
+ * the converted qual would reference any but these parent-query relids.
+ *
+ * On success, the returned JoinExpr has larg = NULL and rarg = the jointree
+ * item representing the pulled-up subquery. The caller must set larg to
+ * represent the relation(s) on the lefthand side of the new join, and insert
+ * the JoinExpr into the upper query's jointree at an appropriate place
+ * (typically, where the lefthand relation(s) had been). Note that the
+ * passed-in SubLink must also be removed from its original position in the
+ * query quals, since the quals of the returned JoinExpr replace it.
+ * (Notionally, we replace the SubLink with a constant TRUE, then elide the
+ * redundant constant from the qual.)
+ *
+ * On success, the caller is also responsible for recursively applying
+ * pull_up_sublinks processing to the rarg and quals of the returned JoinExpr.
+ * (On failure, there is no need to do anything, since pull_up_sublinks will
+ * be applied when we recursively plan the sub-select.)
+ *
+ * Side effects of a successful conversion include adding the SubLink's
+ * subselect to the query's rangetable, so that it can be referenced in
+ * the JoinExpr's rarg.
+ */
+JoinExpr *
+convert_ANY_sublink_to_join(PlannerInfo *root, SubLink *sublink,
+ Relids available_rels)
+{
+ JoinExpr *result;
+ Query *parse = root->parse;
+ Query *subselect = (Query *) sublink->subselect;
+ Relids upper_varnos;
+ int rtindex;
+ ParseNamespaceItem *nsitem;
+ RangeTblEntry *rte;
+ RangeTblRef *rtr;
+ List *subquery_vars;
+ Node *quals;
+ ParseState *pstate;
+
+ Assert(sublink->subLinkType == ANY_SUBLINK);
+
+ /*
+ * The sub-select must not refer to any Vars of the parent query. (Vars of
+ * higher levels should be okay, though.)
+ */
+ if (contain_vars_of_level((Node *) subselect, 1))
+ return NULL;
+
+ /*
+ * The test expression must contain some Vars of the parent query, else
+ * it's not gonna be a join. (Note that it won't have Vars referring to
+ * the subquery, rather Params.)
+ */
+ upper_varnos = pull_varnos(root, sublink->testexpr);
+ if (bms_is_empty(upper_varnos))
+ return NULL;
+
+ /*
+ * However, it can't refer to anything outside available_rels.
+ */
+ if (!bms_is_subset(upper_varnos, available_rels))
+ return NULL;
+
+ /*
+ * The combining operators and left-hand expressions mustn't be volatile.
+ */
+ if (contain_volatile_functions(sublink->testexpr))
+ return NULL;
+
+ /* Create a dummy ParseState for addRangeTableEntryForSubquery */
+ pstate = make_parsestate(NULL);
+
+ /*
+ * Okay, pull up the sub-select into upper range table.
+ *
+ * We rely here on the assumption that the outer query has no references
+ * to the inner (necessarily true, other than the Vars that we build
+ * below). Therefore this is a lot easier than what pull_up_subqueries has
+ * to go through.
+ */
+ nsitem = addRangeTableEntryForSubquery(pstate,
+ subselect,
+ makeAlias("ANY_subquery", NIL),
+ false,
+ false);
+ rte = nsitem->p_rte;
+ parse->rtable = lappend(parse->rtable, rte);
+ rtindex = list_length(parse->rtable);
+
+ /*
+ * Form a RangeTblRef for the pulled-up sub-select.
+ */
+ rtr = makeNode(RangeTblRef);
+ rtr->rtindex = rtindex;
+
+ /*
+ * Build a list of Vars representing the subselect outputs.
+ */
+ subquery_vars = generate_subquery_vars(root,
+ subselect->targetList,
+ rtindex);
+
+ /*
+ * Build the new join's qual expression, replacing Params with these Vars.
+ */
+ quals = convert_testexpr(root, sublink->testexpr, subquery_vars);
+
+ /*
+ * And finally, build the JoinExpr node.
+ */
+ result = makeNode(JoinExpr);
+ result->jointype = JOIN_SEMI;
+ result->isNatural = false;
+ result->larg = NULL; /* caller must fill this in */
+ result->rarg = (Node *) rtr;
+ result->usingClause = NIL;
+ result->join_using_alias = NULL;
+ result->quals = quals;
+ result->alias = NULL;
+ result->rtindex = 0; /* we don't need an RTE for it */
+
+ return result;
+}
+
+/*
+ * convert_EXISTS_sublink_to_join: try to convert an EXISTS SubLink to a join
+ *
+ * The API of this function is identical to convert_ANY_sublink_to_join's,
+ * except that we also support the case where the caller has found NOT EXISTS,
+ * so we need an additional input parameter "under_not".
+ */
+JoinExpr *
+convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
+ bool under_not, Relids available_rels)
+{
+ JoinExpr *result;
+ Query *parse = root->parse;
+ Query *subselect = (Query *) sublink->subselect;
+ Node *whereClause;
+ int rtoffset;
+ int varno;
+ Relids clause_varnos;
+ Relids upper_varnos;
+
+ Assert(sublink->subLinkType == EXISTS_SUBLINK);
+
+ /*
+ * Can't flatten if it contains WITH. (We could arrange to pull up the
+ * WITH into the parent query's cteList, but that risks changing the
+ * semantics, since a WITH ought to be executed once per associated query
+ * call.) Note that convert_ANY_sublink_to_join doesn't have to reject
+ * this case, since it just produces a subquery RTE that doesn't have to
+ * get flattened into the parent query.
+ */
+ if (subselect->cteList)
+ return NULL;
+
+ /*
+ * Copy the subquery so we can modify it safely (see comments in
+ * make_subplan).
+ */
+ subselect = copyObject(subselect);
+
+ /*
+ * See if the subquery can be simplified based on the knowledge that it's
+ * being used in EXISTS(). If we aren't able to get rid of its
+ * targetlist, we have to fail, because the pullup operation leaves us
+ * with noplace to evaluate the targetlist.
+ */
+ if (!simplify_EXISTS_query(root, subselect))
+ return NULL;
+
+ /*
+ * Separate out the WHERE clause. (We could theoretically also remove
+ * top-level plain JOIN/ON clauses, but it's probably not worth the
+ * trouble.)
+ */
+ whereClause = subselect->jointree->quals;
+ subselect->jointree->quals = NULL;
+
+ /*
+ * The rest of the sub-select must not refer to any Vars of the parent
+ * query. (Vars of higher levels should be okay, though.)
+ */
+ if (contain_vars_of_level((Node *) subselect, 1))
+ return NULL;
+
+ /*
+ * On the other hand, the WHERE clause must contain some Vars of the
+ * parent query, else it's not gonna be a join.
+ */
+ if (!contain_vars_of_level(whereClause, 1))
+ return NULL;
+
+ /*
+ * We don't risk optimizing if the WHERE clause is volatile, either.
+ */
+ if (contain_volatile_functions(whereClause))
+ return NULL;
+
+ /*
+ * The subquery must have a nonempty jointree, but we can make it so.
+ */
+ replace_empty_jointree(subselect);
+
+ /*
+ * Prepare to pull up the sub-select into top range table.
+ *
+ * We rely here on the assumption that the outer query has no references
+ * to the inner (necessarily true). Therefore this is a lot easier than
+ * what pull_up_subqueries has to go through.
+ *
+ * In fact, it's even easier than what convert_ANY_sublink_to_join has to
+ * do. The machinations of simplify_EXISTS_query ensured that there is
+ * nothing interesting in the subquery except an rtable and jointree, and
+ * even the jointree FromExpr no longer has quals. So we can just append
+ * the rtable to our own and use the FromExpr in our jointree. But first,
+ * adjust all level-zero varnos in the subquery to account for the rtable
+ * merger.
+ */
+ rtoffset = list_length(parse->rtable);
+ OffsetVarNodes((Node *) subselect, rtoffset, 0);
+ OffsetVarNodes(whereClause, rtoffset, 0);
+
+ /*
+ * Upper-level vars in subquery will now be one level closer to their
+ * parent than before; in particular, anything that had been level 1
+ * becomes level zero.
+ */
+ IncrementVarSublevelsUp((Node *) subselect, -1, 1);
+ IncrementVarSublevelsUp(whereClause, -1, 1);
+
+ /*
+ * Now that the WHERE clause is adjusted to match the parent query
+ * environment, we can easily identify all the level-zero rels it uses.
+ * The ones <= rtoffset belong to the upper query; the ones > rtoffset do
+ * not.
+ */
+ clause_varnos = pull_varnos(root, whereClause);
+ upper_varnos = NULL;
+ while ((varno = bms_first_member(clause_varnos)) >= 0)
+ {
+ if (varno <= rtoffset)
+ upper_varnos = bms_add_member(upper_varnos, varno);
+ }
+ bms_free(clause_varnos);
+ Assert(!bms_is_empty(upper_varnos));
+
+ /*
+ * Now that we've got the set of upper-level varnos, we can make the last
+ * check: only available_rels can be referenced.
+ */
+ if (!bms_is_subset(upper_varnos, available_rels))
+ return NULL;
+
+ /* Now we can attach the modified subquery rtable to the parent */
+ parse->rtable = list_concat(parse->rtable, subselect->rtable);
+
+ /*
+ * And finally, build the JoinExpr node.
+ */
+ result = makeNode(JoinExpr);
+ result->jointype = under_not ? JOIN_ANTI : JOIN_SEMI;
+ result->isNatural = false;
+ result->larg = NULL; /* caller must fill this in */
+ /* flatten out the FromExpr node if it's useless */
+ if (list_length(subselect->jointree->fromlist) == 1)
+ result->rarg = (Node *) linitial(subselect->jointree->fromlist);
+ else
+ result->rarg = (Node *) subselect->jointree;
+ result->usingClause = NIL;
+ result->join_using_alias = NULL;
+ result->quals = whereClause;
+ result->alias = NULL;
+ result->rtindex = 0; /* we don't need an RTE for it */
+
+ return result;
+}
+
+/*
+ * simplify_EXISTS_query: remove any useless stuff in an EXISTS's subquery
+ *
+ * The only thing that matters about an EXISTS query is whether it returns
+ * zero or more than zero rows. Therefore, we can remove certain SQL features
+ * that won't affect that. The only part that is really likely to matter in
+ * typical usage is simplifying the targetlist: it's a common habit to write
+ * "SELECT * FROM" even though there is no need to evaluate any columns.
+ *
+ * Note: by suppressing the targetlist we could cause an observable behavioral
+ * change, namely that any errors that might occur in evaluating the tlist
+ * won't occur, nor will other side-effects of volatile functions. This seems
+ * unlikely to bother anyone in practice.
+ *
+ * Returns true if was able to discard the targetlist, else false.
+ */
+static bool
+simplify_EXISTS_query(PlannerInfo *root, Query *query)
+{
+ /*
+ * We don't try to simplify at all if the query uses set operations,
+ * aggregates, grouping sets, SRFs, modifying CTEs, HAVING, OFFSET, or FOR
+ * UPDATE/SHARE; none of these seem likely in normal usage and their
+ * possible effects are complex. (Note: we could ignore an "OFFSET 0"
+ * clause, but that traditionally is used as an optimization fence, so we
+ * don't.)
+ */
+ if (query->commandType != CMD_SELECT ||
+ query->setOperations ||
+ query->hasAggs ||
+ query->groupingSets ||
+ query->hasWindowFuncs ||
+ query->hasTargetSRFs ||
+ query->hasModifyingCTE ||
+ query->havingQual ||
+ query->limitOffset ||
+ query->rowMarks)
+ return false;
+
+ /*
+ * LIMIT with a constant positive (or NULL) value doesn't affect the
+ * semantics of EXISTS, so let's ignore such clauses. This is worth doing
+ * because people accustomed to certain other DBMSes may be in the habit
+ * of writing EXISTS(SELECT ... LIMIT 1) as an optimization. If there's a
+ * LIMIT with anything else as argument, though, we can't simplify.
+ */
+ if (query->limitCount)
+ {
+ /*
+ * The LIMIT clause has not yet been through eval_const_expressions,
+ * so we have to apply that here. It might seem like this is a waste
+ * of cycles, since the only case plausibly worth worrying about is
+ * "LIMIT 1" ... but what we'll actually see is "LIMIT int8(1::int4)",
+ * so we have to fold constants or we're not going to recognize it.
+ */
+ Node *node = eval_const_expressions(root, query->limitCount);
+ Const *limit;
+
+ /* Might as well update the query if we simplified the clause. */
+ query->limitCount = node;
+
+ if (!IsA(node, Const))
+ return false;
+
+ limit = (Const *) node;
+ Assert(limit->consttype == INT8OID);
+ if (!limit->constisnull && DatumGetInt64(limit->constvalue) <= 0)
+ return false;
+
+ /* Whether or not the targetlist is safe, we can drop the LIMIT. */
+ query->limitCount = NULL;
+ }
+
+ /*
+ * Otherwise, we can throw away the targetlist, as well as any GROUP,
+ * WINDOW, DISTINCT, and ORDER BY clauses; none of those clauses will
+ * change a nonzero-rows result to zero rows or vice versa. (Furthermore,
+ * since our parsetree representation of these clauses depends on the
+ * targetlist, we'd better throw them away if we drop the targetlist.)
+ */
+ query->targetList = NIL;
+ query->groupClause = NIL;
+ query->windowClause = NIL;
+ query->distinctClause = NIL;
+ query->sortClause = NIL;
+ query->hasDistinctOn = false;
+
+ return true;
+}
+
+/*
+ * convert_EXISTS_to_ANY: try to convert EXISTS to a hashable ANY sublink
+ *
+ * The subselect is expected to be a fresh copy that we can munge up,
+ * and to have been successfully passed through simplify_EXISTS_query.
+ *
+ * On success, the modified subselect is returned, and we store a suitable
+ * upper-level test expression at *testexpr, plus a list of the subselect's
+ * output Params at *paramIds. (The test expression is already Param-ified
+ * and hence need not go through convert_testexpr, which is why we have to
+ * deal with the Param IDs specially.)
+ *
+ * On failure, returns NULL.
+ */
+static Query *
+convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect,
+ Node **testexpr, List **paramIds)
+{
+ Node *whereClause;
+ List *leftargs,
+ *rightargs,
+ *opids,
+ *opcollations,
+ *newWhere,
+ *tlist,
+ *testlist,
+ *paramids;
+ ListCell *lc,
+ *rc,
+ *oc,
+ *cc;
+ AttrNumber resno;
+
+ /*
+ * Query must not require a targetlist, since we have to insert a new one.
+ * Caller should have dealt with the case already.
+ */
+ Assert(subselect->targetList == NIL);
+
+ /*
+ * Separate out the WHERE clause. (We could theoretically also remove
+ * top-level plain JOIN/ON clauses, but it's probably not worth the
+ * trouble.)
+ */
+ whereClause = subselect->jointree->quals;
+ subselect->jointree->quals = NULL;
+
+ /*
+ * The rest of the sub-select must not refer to any Vars of the parent
+ * query. (Vars of higher levels should be okay, though.)
+ *
+ * Note: we need not check for Aggrefs separately because we know the
+ * sub-select is as yet unoptimized; any uplevel Aggref must therefore
+ * contain an uplevel Var reference. This is not the case below ...
+ */
+ if (contain_vars_of_level((Node *) subselect, 1))
+ return NULL;
+
+ /*
+ * We don't risk optimizing if the WHERE clause is volatile, either.
+ */
+ if (contain_volatile_functions(whereClause))
+ return NULL;
+
+ /*
+ * Clean up the WHERE clause by doing const-simplification etc on it.
+ * Aside from simplifying the processing we're about to do, this is
+ * important for being able to pull chunks of the WHERE clause up into the
+ * parent query. Since we are invoked partway through the parent's
+ * preprocess_expression() work, earlier steps of preprocess_expression()
+ * wouldn't get applied to the pulled-up stuff unless we do them here. For
+ * the parts of the WHERE clause that get put back into the child query,
+ * this work is partially duplicative, but it shouldn't hurt.
+ *
+ * Note: we do not run flatten_join_alias_vars. This is OK because any
+ * parent aliases were flattened already, and we're not going to pull any
+ * child Vars (of any description) into the parent.
+ *
+ * Note: passing the parent's root to eval_const_expressions is
+ * technically wrong, but we can get away with it since only the
+ * boundParams (if any) are used, and those would be the same in a
+ * subroot.
+ */
+ whereClause = eval_const_expressions(root, whereClause);
+ whereClause = (Node *) canonicalize_qual((Expr *) whereClause, false);
+ whereClause = (Node *) make_ands_implicit((Expr *) whereClause);
+
+ /*
+ * We now have a flattened implicit-AND list of clauses, which we try to
+ * break apart into "outervar = innervar" hash clauses. Anything that
+ * can't be broken apart just goes back into the newWhere list. Note that
+ * we aren't trying hard yet to ensure that we have only outer or only
+ * inner on each side; we'll check that if we get to the end.
+ */
+ leftargs = rightargs = opids = opcollations = newWhere = NIL;
+ foreach(lc, (List *) whereClause)
+ {
+ OpExpr *expr = (OpExpr *) lfirst(lc);
+
+ if (IsA(expr, OpExpr) &&
+ hash_ok_operator(expr))
+ {
+ Node *leftarg = (Node *) linitial(expr->args);
+ Node *rightarg = (Node *) lsecond(expr->args);
+
+ if (contain_vars_of_level(leftarg, 1))
+ {
+ leftargs = lappend(leftargs, leftarg);
+ rightargs = lappend(rightargs, rightarg);
+ opids = lappend_oid(opids, expr->opno);
+ opcollations = lappend_oid(opcollations, expr->inputcollid);
+ continue;
+ }
+ if (contain_vars_of_level(rightarg, 1))
+ {
+ /*
+ * We must commute the clause to put the outer var on the
+ * left, because the hashing code in nodeSubplan.c expects
+ * that. This probably shouldn't ever fail, since hashable
+ * operators ought to have commutators, but be paranoid.
+ */
+ expr->opno = get_commutator(expr->opno);
+ if (OidIsValid(expr->opno) && hash_ok_operator(expr))
+ {
+ leftargs = lappend(leftargs, rightarg);
+ rightargs = lappend(rightargs, leftarg);
+ opids = lappend_oid(opids, expr->opno);
+ opcollations = lappend_oid(opcollations, expr->inputcollid);
+ continue;
+ }
+ /* If no commutator, no chance to optimize the WHERE clause */
+ return NULL;
+ }
+ }
+ /* Couldn't handle it as a hash clause */
+ newWhere = lappend(newWhere, expr);
+ }
+
+ /*
+ * If we didn't find anything we could convert, fail.
+ */
+ if (leftargs == NIL)
+ return NULL;
+
+ /*
+ * There mustn't be any parent Vars or Aggs in the stuff that we intend to
+ * put back into the child query. Note: you might think we don't need to
+ * check for Aggs separately, because an uplevel Agg must contain an
+ * uplevel Var in its argument. But it is possible that the uplevel Var
+ * got optimized away by eval_const_expressions. Consider
+ *
+ * SUM(CASE WHEN false THEN uplevelvar ELSE 0 END)
+ */
+ if (contain_vars_of_level((Node *) newWhere, 1) ||
+ contain_vars_of_level((Node *) rightargs, 1))
+ return NULL;
+ if (root->parse->hasAggs &&
+ (contain_aggs_of_level((Node *) newWhere, 1) ||
+ contain_aggs_of_level((Node *) rightargs, 1)))
+ return NULL;
+
+ /*
+ * And there can't be any child Vars in the stuff we intend to pull up.
+ * (Note: we'd need to check for child Aggs too, except we know the child
+ * has no aggs at all because of simplify_EXISTS_query's check. The same
+ * goes for window functions.)
+ */
+ if (contain_vars_of_level((Node *) leftargs, 0))
+ return NULL;
+
+ /*
+ * Also reject sublinks in the stuff we intend to pull up. (It might be
+ * possible to support this, but doesn't seem worth the complication.)
+ */
+ if (contain_subplans((Node *) leftargs))
+ return NULL;
+
+ /*
+ * Okay, adjust the sublevelsup in the stuff we're pulling up.
+ */
+ IncrementVarSublevelsUp((Node *) leftargs, -1, 1);
+
+ /*
+ * Put back any child-level-only WHERE clauses.
+ */
+ if (newWhere)
+ subselect->jointree->quals = (Node *) make_ands_explicit(newWhere);
+
+ /*
+ * Build a new targetlist for the child that emits the expressions we
+ * need. Concurrently, build a testexpr for the parent using Params to
+ * reference the child outputs. (Since we generate Params directly here,
+ * there will be no need to convert the testexpr in build_subplan.)
+ */
+ tlist = testlist = paramids = NIL;
+ resno = 1;
+ forfour(lc, leftargs, rc, rightargs, oc, opids, cc, opcollations)
+ {
+ Node *leftarg = (Node *) lfirst(lc);
+ Node *rightarg = (Node *) lfirst(rc);
+ Oid opid = lfirst_oid(oc);
+ Oid opcollation = lfirst_oid(cc);
+ Param *param;
+
+ param = generate_new_exec_param(root,
+ exprType(rightarg),
+ exprTypmod(rightarg),
+ exprCollation(rightarg));
+ tlist = lappend(tlist,
+ makeTargetEntry((Expr *) rightarg,
+ resno++,
+ NULL,
+ false));
+ testlist = lappend(testlist,
+ make_opclause(opid, BOOLOID, false,
+ (Expr *) leftarg, (Expr *) param,
+ InvalidOid, opcollation));
+ paramids = lappend_int(paramids, param->paramid);
+ }
+
+ /* Put everything where it should go, and we're done */
+ subselect->targetList = tlist;
+ *testexpr = (Node *) make_ands_explicit(testlist);
+ *paramIds = paramids;
+
+ return subselect;
+}
+
+
+/*
+ * Replace correlation vars (uplevel vars) with Params.
+ *
+ * Uplevel PlaceHolderVars and aggregates are replaced, too.
+ *
+ * Note: it is critical that this runs immediately after SS_process_sublinks.
+ * Since we do not recurse into the arguments of uplevel PHVs and aggregates,
+ * they will get copied to the appropriate subplan args list in the parent
+ * query with uplevel vars not replaced by Params, but only adjusted in level
+ * (see replace_outer_placeholdervar and replace_outer_agg). That's exactly
+ * what we want for the vars of the parent level --- but if a PHV's or
+ * aggregate's argument contains any further-up variables, they have to be
+ * replaced with Params in their turn. That will happen when the parent level
+ * runs SS_replace_correlation_vars. Therefore it must do so after expanding
+ * its sublinks to subplans. And we don't want any steps in between, else
+ * those steps would never get applied to the argument expressions, either in
+ * the parent or the child level.
+ *
+ * Another fairly tricky thing going on here is the handling of SubLinks in
+ * the arguments of uplevel PHVs/aggregates. Those are not touched inside the
+ * intermediate query level, either. Instead, SS_process_sublinks recurses on
+ * them after copying the PHV or Aggref expression into the parent plan level
+ * (this is actually taken care of in build_subplan).
+ */
+Node *
+SS_replace_correlation_vars(PlannerInfo *root, Node *expr)
+{
+ /* No setup needed for tree walk, so away we go */
+ return replace_correlation_vars_mutator(expr, root);
+}
+
+static Node *
+replace_correlation_vars_mutator(Node *node, PlannerInfo *root)
+{
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, Var))
+ {
+ if (((Var *) node)->varlevelsup > 0)
+ return (Node *) replace_outer_var(root, (Var *) node);
+ }
+ if (IsA(node, PlaceHolderVar))
+ {
+ if (((PlaceHolderVar *) node)->phlevelsup > 0)
+ return (Node *) replace_outer_placeholdervar(root,
+ (PlaceHolderVar *) node);
+ }
+ if (IsA(node, Aggref))
+ {
+ if (((Aggref *) node)->agglevelsup > 0)
+ return (Node *) replace_outer_agg(root, (Aggref *) node);
+ }
+ if (IsA(node, GroupingFunc))
+ {
+ if (((GroupingFunc *) node)->agglevelsup > 0)
+ return (Node *) replace_outer_grouping(root, (GroupingFunc *) node);
+ }
+ return expression_tree_mutator(node,
+ replace_correlation_vars_mutator,
+ (void *) root);
+}
+
+/*
+ * Expand SubLinks to SubPlans in the given expression.
+ *
+ * The isQual argument tells whether or not this expression is a WHERE/HAVING
+ * qualifier expression. If it is, any sublinks appearing at top level need
+ * not distinguish FALSE from UNKNOWN return values.
+ */
+Node *
+SS_process_sublinks(PlannerInfo *root, Node *expr, bool isQual)
+{
+ process_sublinks_context context;
+
+ context.root = root;
+ context.isTopQual = isQual;
+ return process_sublinks_mutator(expr, &context);
+}
+
+static Node *
+process_sublinks_mutator(Node *node, process_sublinks_context *context)
+{
+ process_sublinks_context locContext;
+
+ locContext.root = context->root;
+
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, SubLink))
+ {
+ SubLink *sublink = (SubLink *) node;
+ Node *testexpr;
+
+ /*
+ * First, recursively process the lefthand-side expressions, if any.
+ * They're not top-level anymore.
+ */
+ locContext.isTopQual = false;
+ testexpr = process_sublinks_mutator(sublink->testexpr, &locContext);
+
+ /*
+ * Now build the SubPlan node and make the expr to return.
+ */
+ return make_subplan(context->root,
+ (Query *) sublink->subselect,
+ sublink->subLinkType,
+ sublink->subLinkId,
+ testexpr,
+ context->isTopQual);
+ }
+
+ /*
+ * Don't recurse into the arguments of an outer PHV, Aggref or
+ * GroupingFunc here. Any SubLinks in the arguments have to be dealt with
+ * at the outer query level; they'll be handled when build_subplan
+ * collects the PHV, Aggref or GroupingFunc into the arguments to be
+ * passed down to the current subplan.
+ */
+ if (IsA(node, PlaceHolderVar))
+ {
+ if (((PlaceHolderVar *) node)->phlevelsup > 0)
+ return node;
+ }
+ else if (IsA(node, Aggref))
+ {
+ if (((Aggref *) node)->agglevelsup > 0)
+ return node;
+ }
+ else if (IsA(node, GroupingFunc))
+ {
+ if (((GroupingFunc *) node)->agglevelsup > 0)
+ return node;
+ }
+
+ /*
+ * We should never see a SubPlan expression in the input (since this is
+ * the very routine that creates 'em to begin with). We shouldn't find
+ * ourselves invoked directly on a Query, either.
+ */
+ Assert(!IsA(node, SubPlan));
+ Assert(!IsA(node, AlternativeSubPlan));
+ Assert(!IsA(node, Query));
+
+ /*
+ * Because make_subplan() could return an AND or OR clause, we have to
+ * take steps to preserve AND/OR flatness of a qual. We assume the input
+ * has been AND/OR flattened and so we need no recursion here.
+ *
+ * (Due to the coding here, we will not get called on the List subnodes of
+ * an AND; and the input is *not* yet in implicit-AND format. So no check
+ * is needed for a bare List.)
+ *
+ * Anywhere within the top-level AND/OR clause structure, we can tell
+ * make_subplan() that NULL and FALSE are interchangeable. So isTopQual
+ * propagates down in both cases. (Note that this is unlike the meaning
+ * of "top level qual" used in most other places in Postgres.)
+ */
+ if (is_andclause(node))
+ {
+ List *newargs = NIL;
+ ListCell *l;
+
+ /* Still at qual top-level */
+ locContext.isTopQual = context->isTopQual;
+
+ foreach(l, ((BoolExpr *) node)->args)
+ {
+ Node *newarg;
+
+ newarg = process_sublinks_mutator(lfirst(l), &locContext);
+ if (is_andclause(newarg))
+ newargs = list_concat(newargs, ((BoolExpr *) newarg)->args);
+ else
+ newargs = lappend(newargs, newarg);
+ }
+ return (Node *) make_andclause(newargs);
+ }
+
+ if (is_orclause(node))
+ {
+ List *newargs = NIL;
+ ListCell *l;
+
+ /* Still at qual top-level */
+ locContext.isTopQual = context->isTopQual;
+
+ foreach(l, ((BoolExpr *) node)->args)
+ {
+ Node *newarg;
+
+ newarg = process_sublinks_mutator(lfirst(l), &locContext);
+ if (is_orclause(newarg))
+ newargs = list_concat(newargs, ((BoolExpr *) newarg)->args);
+ else
+ newargs = lappend(newargs, newarg);
+ }
+ return (Node *) make_orclause(newargs);
+ }
+
+ /*
+ * If we recurse down through anything other than an AND or OR node, we
+ * are definitely not at top qual level anymore.
+ */
+ locContext.isTopQual = false;
+
+ return expression_tree_mutator(node,
+ process_sublinks_mutator,
+ (void *) &locContext);
+}
+
+/*
+ * SS_identify_outer_params - identify the Params available from outer levels
+ *
+ * This must be run after SS_replace_correlation_vars and SS_process_sublinks
+ * processing is complete in a given query level as well as all of its
+ * descendant levels (which means it's most practical to do it at the end of
+ * processing the query level). We compute the set of paramIds that outer
+ * levels will make available to this level+descendants, and record it in
+ * root->outer_params for use while computing extParam/allParam sets in final
+ * plan cleanup. (We can't just compute it then, because the upper levels'
+ * plan_params lists are transient and will be gone by then.)
+ */
+void
+SS_identify_outer_params(PlannerInfo *root)
+{
+ Bitmapset *outer_params;
+ PlannerInfo *proot;
+ ListCell *l;
+
+ /*
+ * If no parameters have been assigned anywhere in the tree, we certainly
+ * don't need to do anything here.
+ */
+ if (root->glob->paramExecTypes == NIL)
+ return;
+
+ /*
+ * Scan all query levels above this one to see which parameters are due to
+ * be available from them, either because lower query levels have
+ * requested them (via plan_params) or because they will be available from
+ * initPlans of those levels.
+ */
+ outer_params = NULL;
+ for (proot = root->parent_root; proot != NULL; proot = proot->parent_root)
+ {
+ /* Include ordinary Var/PHV/Aggref/GroupingFunc params */
+ foreach(l, proot->plan_params)
+ {
+ PlannerParamItem *pitem = (PlannerParamItem *) lfirst(l);
+
+ outer_params = bms_add_member(outer_params, pitem->paramId);
+ }
+ /* Include any outputs of outer-level initPlans */
+ foreach(l, proot->init_plans)
+ {
+ SubPlan *initsubplan = (SubPlan *) lfirst(l);
+ ListCell *l2;
+
+ foreach(l2, initsubplan->setParam)
+ {
+ outer_params = bms_add_member(outer_params, lfirst_int(l2));
+ }
+ }
+ /* Include worktable ID, if a recursive query is being planned */
+ if (proot->wt_param_id >= 0)
+ outer_params = bms_add_member(outer_params, proot->wt_param_id);
+ }
+ root->outer_params = outer_params;
+}
+
+/*
+ * SS_charge_for_initplans - account for initplans in Path costs & parallelism
+ *
+ * If any initPlans have been created in the current query level, they will
+ * get attached to the Plan tree created from whichever Path we select from
+ * the given rel. Increment all that rel's Paths' costs to account for them,
+ * and make sure the paths get marked as parallel-unsafe, since we can't
+ * currently transmit initPlans to parallel workers.
+ *
+ * This is separate from SS_attach_initplans because we might conditionally
+ * create more initPlans during create_plan(), depending on which Path we
+ * select. However, Paths that would generate such initPlans are expected
+ * to have included their cost and parallel-safety effects already.
+ */
+void
+SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel)
+{
+ Cost initplan_cost;
+ ListCell *lc;
+
+ /* Nothing to do if no initPlans */
+ if (root->init_plans == NIL)
+ return;
+
+ /*
+ * Compute the cost increment just once, since it will be the same for all
+ * Paths. We assume each initPlan gets run once during top plan startup.
+ * This is a conservative overestimate, since in fact an initPlan might be
+ * executed later than plan startup, or even not at all.
+ */
+ initplan_cost = 0;
+ foreach(lc, root->init_plans)
+ {
+ SubPlan *initsubplan = (SubPlan *) lfirst(lc);
+
+ initplan_cost += initsubplan->startup_cost + initsubplan->per_call_cost;
+ }
+
+ /*
+ * Now adjust the costs and parallel_safe flags.
+ */
+ foreach(lc, final_rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+
+ path->startup_cost += initplan_cost;
+ path->total_cost += initplan_cost;
+ path->parallel_safe = false;
+ }
+
+ /*
+ * Forget about any partial paths and clear consider_parallel, too;
+ * they're not usable if we attached an initPlan.
+ */
+ final_rel->partial_pathlist = NIL;
+ final_rel->consider_parallel = false;
+
+ /* We needn't do set_cheapest() here, caller will do it */
+}
+
+/*
+ * SS_attach_initplans - attach initplans to topmost plan node
+ *
+ * Attach any initplans created in the current query level to the specified
+ * plan node, which should normally be the topmost node for the query level.
+ * (In principle the initPlans could go in any node at or above where they're
+ * referenced; but there seems no reason to put them any lower than the
+ * topmost node, so we don't bother to track exactly where they came from.)
+ *
+ * We do not touch the plan node's cost or parallel_safe flag. The initplans
+ * must have been accounted for in SS_charge_for_initplans, or by any later
+ * code that adds initplans via SS_make_initplan_from_plan.
+ */
+void
+SS_attach_initplans(PlannerInfo *root, Plan *plan)
+{
+ plan->initPlan = root->init_plans;
+}
+
+/*
+ * SS_finalize_plan - do final parameter processing for a completed Plan.
+ *
+ * This recursively computes the extParam and allParam sets for every Plan
+ * node in the given plan tree. (Oh, and RangeTblFunction.funcparams too.)
+ *
+ * We assume that SS_finalize_plan has already been run on any initplans or
+ * subplans the plan tree could reference.
+ */
+void
+SS_finalize_plan(PlannerInfo *root, Plan *plan)
+{
+ /* No setup needed, just recurse through plan tree. */
+ (void) finalize_plan(root, plan, -1, root->outer_params, NULL);
+}
+
+/*
+ * Recursive processing of all nodes in the plan tree
+ *
+ * gather_param is the rescan_param of an ancestral Gather/GatherMerge,
+ * or -1 if there is none.
+ *
+ * valid_params is the set of param IDs supplied by outer plan levels
+ * that are valid to reference in this plan node or its children.
+ *
+ * scan_params is a set of param IDs to force scan plan nodes to reference.
+ * This is for EvalPlanQual support, and is always NULL at the top of the
+ * recursion.
+ *
+ * The return value is the computed allParam set for the given Plan node.
+ * This is just an internal notational convenience: we can add a child
+ * plan's allParams to the set of param IDs of interest to this level
+ * in the same statement that recurses to that child.
+ *
+ * Do not scribble on caller's values of valid_params or scan_params!
+ *
+ * Note: although we attempt to deal with initPlans anywhere in the tree, the
+ * logic is not really right. The problem is that a plan node might return an
+ * output Param of its initPlan as a targetlist item, in which case it's valid
+ * for the parent plan level to reference that same Param; the parent's usage
+ * will be converted into a Var referencing the child plan node by setrefs.c.
+ * But this function would see the parent's reference as out of scope and
+ * complain about it. For now, this does not matter because the planner only
+ * attaches initPlans to the topmost plan node in a query level, so the case
+ * doesn't arise. If we ever merge this processing into setrefs.c, maybe it
+ * can be handled more cleanly.
+ */
+static Bitmapset *
+finalize_plan(PlannerInfo *root, Plan *plan,
+ int gather_param,
+ Bitmapset *valid_params,
+ Bitmapset *scan_params)
+{
+ finalize_primnode_context context;
+ int locally_added_param;
+ Bitmapset *nestloop_params;
+ Bitmapset *initExtParam;
+ Bitmapset *initSetParam;
+ Bitmapset *child_params;
+ ListCell *l;
+
+ if (plan == NULL)
+ return NULL;
+
+ context.root = root;
+ context.paramids = NULL; /* initialize set to empty */
+ locally_added_param = -1; /* there isn't one */
+ nestloop_params = NULL; /* there aren't any */
+
+ /*
+ * Examine any initPlans to determine the set of external params they
+ * reference and the set of output params they supply. (We assume
+ * SS_finalize_plan was run on them already.)
+ */
+ initExtParam = initSetParam = NULL;
+ foreach(l, plan->initPlan)
+ {
+ SubPlan *initsubplan = (SubPlan *) lfirst(l);
+ Plan *initplan = planner_subplan_get_plan(root, initsubplan);
+ ListCell *l2;
+
+ initExtParam = bms_add_members(initExtParam, initplan->extParam);
+ foreach(l2, initsubplan->setParam)
+ {
+ initSetParam = bms_add_member(initSetParam, lfirst_int(l2));
+ }
+ }
+
+ /* Any setParams are validly referenceable in this node and children */
+ if (initSetParam)
+ valid_params = bms_union(valid_params, initSetParam);
+
+ /*
+ * When we call finalize_primnode, context.paramids sets are automatically
+ * merged together. But when recursing to self, we have to do it the hard
+ * way. We want the paramids set to include params in subplans as well as
+ * at this level.
+ */
+
+ /* Find params in targetlist and qual */
+ finalize_primnode((Node *) plan->targetlist, &context);
+ finalize_primnode((Node *) plan->qual, &context);
+
+ /*
+ * If it's a parallel-aware scan node, mark it as dependent on the parent
+ * Gather/GatherMerge's rescan Param.
+ */
+ if (plan->parallel_aware)
+ {
+ if (gather_param < 0)
+ elog(ERROR, "parallel-aware plan node is not below a Gather");
+ context.paramids =
+ bms_add_member(context.paramids, gather_param);
+ }
+
+ /* Check additional node-type-specific fields */
+ switch (nodeTag(plan))
+ {
+ case T_Result:
+ finalize_primnode(((Result *) plan)->resconstantqual,
+ &context);
+ break;
+
+ case T_SeqScan:
+ context.paramids = bms_add_members(context.paramids, scan_params);
+ break;
+
+ case T_SampleScan:
+ finalize_primnode((Node *) ((SampleScan *) plan)->tablesample,
+ &context);
+ context.paramids = bms_add_members(context.paramids, scan_params);
+ break;
+
+ case T_IndexScan:
+ finalize_primnode((Node *) ((IndexScan *) plan)->indexqual,
+ &context);
+ finalize_primnode((Node *) ((IndexScan *) plan)->indexorderby,
+ &context);
+
+ /*
+ * we need not look at indexqualorig, since it will have the same
+ * param references as indexqual. Likewise, we can ignore
+ * indexorderbyorig.
+ */
+ context.paramids = bms_add_members(context.paramids, scan_params);
+ break;
+
+ case T_IndexOnlyScan:
+ finalize_primnode((Node *) ((IndexOnlyScan *) plan)->indexqual,
+ &context);
+ finalize_primnode((Node *) ((IndexOnlyScan *) plan)->recheckqual,
+ &context);
+ finalize_primnode((Node *) ((IndexOnlyScan *) plan)->indexorderby,
+ &context);
+
+ /*
+ * we need not look at indextlist, since it cannot contain Params.
+ */
+ context.paramids = bms_add_members(context.paramids, scan_params);
+ break;
+
+ case T_BitmapIndexScan:
+ finalize_primnode((Node *) ((BitmapIndexScan *) plan)->indexqual,
+ &context);
+
+ /*
+ * we need not look at indexqualorig, since it will have the same
+ * param references as indexqual.
+ */
+ break;
+
+ case T_BitmapHeapScan:
+ finalize_primnode((Node *) ((BitmapHeapScan *) plan)->bitmapqualorig,
+ &context);
+ context.paramids = bms_add_members(context.paramids, scan_params);
+ break;
+
+ case T_TidScan:
+ finalize_primnode((Node *) ((TidScan *) plan)->tidquals,
+ &context);
+ context.paramids = bms_add_members(context.paramids, scan_params);
+ break;
+
+ case T_TidRangeScan:
+ finalize_primnode((Node *) ((TidRangeScan *) plan)->tidrangequals,
+ &context);
+ context.paramids = bms_add_members(context.paramids, scan_params);
+ break;
+
+ case T_SubqueryScan:
+ {
+ SubqueryScan *sscan = (SubqueryScan *) plan;
+ RelOptInfo *rel;
+ Bitmapset *subquery_params;
+
+ /* We must run finalize_plan on the subquery */
+ rel = find_base_rel(root, sscan->scan.scanrelid);
+ subquery_params = rel->subroot->outer_params;
+ if (gather_param >= 0)
+ subquery_params = bms_add_member(bms_copy(subquery_params),
+ gather_param);
+ finalize_plan(rel->subroot, sscan->subplan, gather_param,
+ subquery_params, NULL);
+
+ /* Now we can add its extParams to the parent's params */
+ context.paramids = bms_add_members(context.paramids,
+ sscan->subplan->extParam);
+ /* We need scan_params too, though */
+ context.paramids = bms_add_members(context.paramids,
+ scan_params);
+ }
+ break;
+
+ case T_FunctionScan:
+ {
+ FunctionScan *fscan = (FunctionScan *) plan;
+ ListCell *lc;
+
+ /*
+ * Call finalize_primnode independently on each function
+ * expression, so that we can record which params are
+ * referenced in each, in order to decide which need
+ * re-evaluating during rescan.
+ */
+ foreach(lc, fscan->functions)
+ {
+ RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
+ finalize_primnode_context funccontext;
+
+ funccontext = context;
+ funccontext.paramids = NULL;
+
+ finalize_primnode(rtfunc->funcexpr, &funccontext);
+
+ /* remember results for execution */
+ rtfunc->funcparams = funccontext.paramids;
+
+ /* add the function's params to the overall set */
+ context.paramids = bms_add_members(context.paramids,
+ funccontext.paramids);
+ }
+
+ context.paramids = bms_add_members(context.paramids,
+ scan_params);
+ }
+ break;
+
+ case T_TableFuncScan:
+ finalize_primnode((Node *) ((TableFuncScan *) plan)->tablefunc,
+ &context);
+ context.paramids = bms_add_members(context.paramids, scan_params);
+ break;
+
+ case T_ValuesScan:
+ finalize_primnode((Node *) ((ValuesScan *) plan)->values_lists,
+ &context);
+ context.paramids = bms_add_members(context.paramids, scan_params);
+ break;
+
+ case T_CteScan:
+ {
+ /*
+ * You might think we should add the node's cteParam to
+ * paramids, but we shouldn't because that param is just a
+ * linkage mechanism for multiple CteScan nodes for the same
+ * CTE; it is never used for changed-param signaling. What we
+ * have to do instead is to find the referenced CTE plan and
+ * incorporate its external paramids, so that the correct
+ * things will happen if the CTE references outer-level
+ * variables. See test cases for bug #4902. (We assume
+ * SS_finalize_plan was run on the CTE plan already.)
+ */
+ int plan_id = ((CteScan *) plan)->ctePlanId;
+ Plan *cteplan;
+
+ /* so, do this ... */
+ if (plan_id < 1 || plan_id > list_length(root->glob->subplans))
+ elog(ERROR, "could not find plan for CteScan referencing plan ID %d",
+ plan_id);
+ cteplan = (Plan *) list_nth(root->glob->subplans, plan_id - 1);
+ context.paramids =
+ bms_add_members(context.paramids, cteplan->extParam);
+
+#ifdef NOT_USED
+ /* ... but not this */
+ context.paramids =
+ bms_add_member(context.paramids,
+ ((CteScan *) plan)->cteParam);
+#endif
+
+ context.paramids = bms_add_members(context.paramids,
+ scan_params);
+ }
+ break;
+
+ case T_WorkTableScan:
+ context.paramids =
+ bms_add_member(context.paramids,
+ ((WorkTableScan *) plan)->wtParam);
+ context.paramids = bms_add_members(context.paramids, scan_params);
+ break;
+
+ case T_NamedTuplestoreScan:
+ context.paramids = bms_add_members(context.paramids, scan_params);
+ break;
+
+ case T_ForeignScan:
+ {
+ ForeignScan *fscan = (ForeignScan *) plan;
+
+ finalize_primnode((Node *) fscan->fdw_exprs,
+ &context);
+ finalize_primnode((Node *) fscan->fdw_recheck_quals,
+ &context);
+
+ /* We assume fdw_scan_tlist cannot contain Params */
+ context.paramids = bms_add_members(context.paramids,
+ scan_params);
+ }
+ break;
+
+ case T_CustomScan:
+ {
+ CustomScan *cscan = (CustomScan *) plan;
+ ListCell *lc;
+
+ finalize_primnode((Node *) cscan->custom_exprs,
+ &context);
+ /* We assume custom_scan_tlist cannot contain Params */
+ context.paramids =
+ bms_add_members(context.paramids, scan_params);
+
+ /* child nodes if any */
+ foreach(lc, cscan->custom_plans)
+ {
+ context.paramids =
+ bms_add_members(context.paramids,
+ finalize_plan(root,
+ (Plan *) lfirst(lc),
+ gather_param,
+ valid_params,
+ scan_params));
+ }
+ }
+ break;
+
+ case T_ModifyTable:
+ {
+ ModifyTable *mtplan = (ModifyTable *) plan;
+
+ /* Force descendant scan nodes to reference epqParam */
+ locally_added_param = mtplan->epqParam;
+ valid_params = bms_add_member(bms_copy(valid_params),
+ locally_added_param);
+ scan_params = bms_add_member(bms_copy(scan_params),
+ locally_added_param);
+ finalize_primnode((Node *) mtplan->returningLists,
+ &context);
+ finalize_primnode((Node *) mtplan->onConflictSet,
+ &context);
+ finalize_primnode((Node *) mtplan->onConflictWhere,
+ &context);
+ /* exclRelTlist contains only Vars, doesn't need examination */
+ }
+ break;
+
+ case T_Append:
+ {
+ ListCell *l;
+
+ foreach(l, ((Append *) plan)->appendplans)
+ {
+ context.paramids =
+ bms_add_members(context.paramids,
+ finalize_plan(root,
+ (Plan *) lfirst(l),
+ gather_param,
+ valid_params,
+ scan_params));
+ }
+ }
+ break;
+
+ case T_MergeAppend:
+ {
+ ListCell *l;
+
+ foreach(l, ((MergeAppend *) plan)->mergeplans)
+ {
+ context.paramids =
+ bms_add_members(context.paramids,
+ finalize_plan(root,
+ (Plan *) lfirst(l),
+ gather_param,
+ valid_params,
+ scan_params));
+ }
+ }
+ break;
+
+ case T_BitmapAnd:
+ {
+ ListCell *l;
+
+ foreach(l, ((BitmapAnd *) plan)->bitmapplans)
+ {
+ context.paramids =
+ bms_add_members(context.paramids,
+ finalize_plan(root,
+ (Plan *) lfirst(l),
+ gather_param,
+ valid_params,
+ scan_params));
+ }
+ }
+ break;
+
+ case T_BitmapOr:
+ {
+ ListCell *l;
+
+ foreach(l, ((BitmapOr *) plan)->bitmapplans)
+ {
+ context.paramids =
+ bms_add_members(context.paramids,
+ finalize_plan(root,
+ (Plan *) lfirst(l),
+ gather_param,
+ valid_params,
+ scan_params));
+ }
+ }
+ break;
+
+ case T_NestLoop:
+ {
+ ListCell *l;
+
+ finalize_primnode((Node *) ((Join *) plan)->joinqual,
+ &context);
+ /* collect set of params that will be passed to right child */
+ foreach(l, ((NestLoop *) plan)->nestParams)
+ {
+ NestLoopParam *nlp = (NestLoopParam *) lfirst(l);
+
+ nestloop_params = bms_add_member(nestloop_params,
+ nlp->paramno);
+ }
+ }
+ break;
+
+ case T_MergeJoin:
+ finalize_primnode((Node *) ((Join *) plan)->joinqual,
+ &context);
+ finalize_primnode((Node *) ((MergeJoin *) plan)->mergeclauses,
+ &context);
+ break;
+
+ case T_HashJoin:
+ finalize_primnode((Node *) ((Join *) plan)->joinqual,
+ &context);
+ finalize_primnode((Node *) ((HashJoin *) plan)->hashclauses,
+ &context);
+ break;
+
+ case T_Hash:
+ finalize_primnode((Node *) ((Hash *) plan)->hashkeys,
+ &context);
+ break;
+
+ case T_Limit:
+ finalize_primnode(((Limit *) plan)->limitOffset,
+ &context);
+ finalize_primnode(((Limit *) plan)->limitCount,
+ &context);
+ break;
+
+ case T_RecursiveUnion:
+ /* child nodes are allowed to reference wtParam */
+ locally_added_param = ((RecursiveUnion *) plan)->wtParam;
+ valid_params = bms_add_member(bms_copy(valid_params),
+ locally_added_param);
+ /* wtParam does *not* get added to scan_params */
+ break;
+
+ case T_LockRows:
+ /* Force descendant scan nodes to reference epqParam */
+ locally_added_param = ((LockRows *) plan)->epqParam;
+ valid_params = bms_add_member(bms_copy(valid_params),
+ locally_added_param);
+ scan_params = bms_add_member(bms_copy(scan_params),
+ locally_added_param);
+ break;
+
+ case T_Agg:
+ {
+ Agg *agg = (Agg *) plan;
+
+ /*
+ * AGG_HASHED plans need to know which Params are referenced
+ * in aggregate calls. Do a separate scan to identify them.
+ */
+ if (agg->aggstrategy == AGG_HASHED)
+ {
+ finalize_primnode_context aggcontext;
+
+ aggcontext.root = root;
+ aggcontext.paramids = NULL;
+ finalize_agg_primnode((Node *) agg->plan.targetlist,
+ &aggcontext);
+ finalize_agg_primnode((Node *) agg->plan.qual,
+ &aggcontext);
+ agg->aggParams = aggcontext.paramids;
+ }
+ }
+ break;
+
+ case T_WindowAgg:
+ finalize_primnode(((WindowAgg *) plan)->startOffset,
+ &context);
+ finalize_primnode(((WindowAgg *) plan)->endOffset,
+ &context);
+ break;
+
+ case T_Gather:
+ /* child nodes are allowed to reference rescan_param, if any */
+ locally_added_param = ((Gather *) plan)->rescan_param;
+ if (locally_added_param >= 0)
+ {
+ valid_params = bms_add_member(bms_copy(valid_params),
+ locally_added_param);
+
+ /*
+ * We currently don't support nested Gathers. The issue so
+ * far as this function is concerned would be how to identify
+ * which child nodes depend on which Gather.
+ */
+ Assert(gather_param < 0);
+ /* Pass down rescan_param to child parallel-aware nodes */
+ gather_param = locally_added_param;
+ }
+ /* rescan_param does *not* get added to scan_params */
+ break;
+
+ case T_GatherMerge:
+ /* child nodes are allowed to reference rescan_param, if any */
+ locally_added_param = ((GatherMerge *) plan)->rescan_param;
+ if (locally_added_param >= 0)
+ {
+ valid_params = bms_add_member(bms_copy(valid_params),
+ locally_added_param);
+
+ /*
+ * We currently don't support nested Gathers. The issue so
+ * far as this function is concerned would be how to identify
+ * which child nodes depend on which Gather.
+ */
+ Assert(gather_param < 0);
+ /* Pass down rescan_param to child parallel-aware nodes */
+ gather_param = locally_added_param;
+ }
+ /* rescan_param does *not* get added to scan_params */
+ break;
+
+ case T_Memoize:
+ finalize_primnode((Node *) ((Memoize *) plan)->param_exprs,
+ &context);
+ break;
+
+ case T_ProjectSet:
+ case T_Material:
+ case T_Sort:
+ case T_IncrementalSort:
+ case T_Unique:
+ case T_SetOp:
+ case T_Group:
+ /* no node-type-specific fields need fixing */
+ break;
+
+ default:
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(plan));
+ }
+
+ /* Process left and right child plans, if any */
+ child_params = finalize_plan(root,
+ plan->lefttree,
+ gather_param,
+ valid_params,
+ scan_params);
+ context.paramids = bms_add_members(context.paramids, child_params);
+
+ if (nestloop_params)
+ {
+ /* right child can reference nestloop_params as well as valid_params */
+ child_params = finalize_plan(root,
+ plan->righttree,
+ gather_param,
+ bms_union(nestloop_params, valid_params),
+ scan_params);
+ /* ... and they don't count as parameters used at my level */
+ child_params = bms_difference(child_params, nestloop_params);
+ bms_free(nestloop_params);
+ }
+ else
+ {
+ /* easy case */
+ child_params = finalize_plan(root,
+ plan->righttree,
+ gather_param,
+ valid_params,
+ scan_params);
+ }
+ context.paramids = bms_add_members(context.paramids, child_params);
+
+ /*
+ * Any locally generated parameter doesn't count towards its generating
+ * plan node's external dependencies. (Note: if we changed valid_params
+ * and/or scan_params, we leak those bitmapsets; not worth the notational
+ * trouble to clean them up.)
+ */
+ if (locally_added_param >= 0)
+ {
+ context.paramids = bms_del_member(context.paramids,
+ locally_added_param);
+ }
+
+ /* Now we have all the paramids referenced in this node and children */
+
+ if (!bms_is_subset(context.paramids, valid_params))
+ elog(ERROR, "plan should not reference subplan's variable");
+
+ /*
+ * The plan node's allParam and extParam fields should include all its
+ * referenced paramids, plus contributions from any child initPlans.
+ * However, any setParams of the initPlans should not be present in the
+ * parent node's extParams, only in its allParams. (It's possible that
+ * some initPlans have extParams that are setParams of other initPlans.)
+ */
+
+ /* allParam must include initplans' extParams and setParams */
+ plan->allParam = bms_union(context.paramids, initExtParam);
+ plan->allParam = bms_add_members(plan->allParam, initSetParam);
+ /* extParam must include any initplan extParams */
+ plan->extParam = bms_union(context.paramids, initExtParam);
+ /* but not any initplan setParams */
+ plan->extParam = bms_del_members(plan->extParam, initSetParam);
+
+ /*
+ * For speed at execution time, make sure extParam/allParam are actually
+ * NULL if they are empty sets.
+ */
+ if (bms_is_empty(plan->extParam))
+ plan->extParam = NULL;
+ if (bms_is_empty(plan->allParam))
+ plan->allParam = NULL;
+
+ return plan->allParam;
+}
+
+/*
+ * finalize_primnode: add IDs of all PARAM_EXEC params appearing in the given
+ * expression tree to the result set.
+ */
+static bool
+finalize_primnode(Node *node, finalize_primnode_context *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Param))
+ {
+ if (((Param *) node)->paramkind == PARAM_EXEC)
+ {
+ int paramid = ((Param *) node)->paramid;
+
+ context->paramids = bms_add_member(context->paramids, paramid);
+ }
+ return false; /* no more to do here */
+ }
+ if (IsA(node, SubPlan))
+ {
+ SubPlan *subplan = (SubPlan *) node;
+ Plan *plan = planner_subplan_get_plan(context->root, subplan);
+ ListCell *lc;
+ Bitmapset *subparamids;
+
+ /* Recurse into the testexpr, but not into the Plan */
+ finalize_primnode(subplan->testexpr, context);
+
+ /*
+ * Remove any param IDs of output parameters of the subplan that were
+ * referenced in the testexpr. These are not interesting for
+ * parameter change signaling since we always re-evaluate the subplan.
+ * Note that this wouldn't work too well if there might be uses of the
+ * same param IDs elsewhere in the plan, but that can't happen because
+ * generate_new_exec_param never tries to merge params.
+ */
+ foreach(lc, subplan->paramIds)
+ {
+ context->paramids = bms_del_member(context->paramids,
+ lfirst_int(lc));
+ }
+
+ /* Also examine args list */
+ finalize_primnode((Node *) subplan->args, context);
+
+ /*
+ * Add params needed by the subplan to paramids, but excluding those
+ * we will pass down to it. (We assume SS_finalize_plan was run on
+ * the subplan already.)
+ */
+ subparamids = bms_copy(plan->extParam);
+ foreach(lc, subplan->parParam)
+ {
+ subparamids = bms_del_member(subparamids, lfirst_int(lc));
+ }
+ context->paramids = bms_join(context->paramids, subparamids);
+
+ return false; /* no more to do here */
+ }
+ return expression_tree_walker(node, finalize_primnode,
+ (void *) context);
+}
+
+/*
+ * finalize_agg_primnode: find all Aggref nodes in the given expression tree,
+ * and add IDs of all PARAM_EXEC params appearing within their aggregated
+ * arguments to the result set.
+ */
+static bool
+finalize_agg_primnode(Node *node, finalize_primnode_context *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Aggref))
+ {
+ Aggref *agg = (Aggref *) node;
+
+ /* we should not consider the direct arguments, if any */
+ finalize_primnode((Node *) agg->args, context);
+ finalize_primnode((Node *) agg->aggfilter, context);
+ return false; /* there can't be any Aggrefs below here */
+ }
+ return expression_tree_walker(node, finalize_agg_primnode,
+ (void *) context);
+}
+
+/*
+ * SS_make_initplan_output_param - make a Param for an initPlan's output
+ *
+ * The plan is expected to return a scalar value of the given type/collation.
+ *
+ * Note that in some cases the initplan may not ever appear in the finished
+ * plan tree. If that happens, we'll have wasted a PARAM_EXEC slot, which
+ * is no big deal.
+ */
+Param *
+SS_make_initplan_output_param(PlannerInfo *root,
+ Oid resulttype, int32 resulttypmod,
+ Oid resultcollation)
+{
+ return generate_new_exec_param(root, resulttype,
+ resulttypmod, resultcollation);
+}
+
+/*
+ * SS_make_initplan_from_plan - given a plan tree, make it an InitPlan
+ *
+ * We build an EXPR_SUBLINK SubPlan node and put it into the initplan
+ * list for the outer query level. A Param that represents the initplan's
+ * output has already been assigned using SS_make_initplan_output_param.
+ */
+void
+SS_make_initplan_from_plan(PlannerInfo *root,
+ PlannerInfo *subroot, Plan *plan,
+ Param *prm)
+{
+ SubPlan *node;
+
+ /*
+ * Add the subplan and its PlannerInfo to the global lists.
+ */
+ root->glob->subplans = lappend(root->glob->subplans, plan);
+ root->glob->subroots = lappend(root->glob->subroots, subroot);
+
+ /*
+ * Create a SubPlan node and add it to the outer list of InitPlans. Note
+ * it has to appear after any other InitPlans it might depend on (see
+ * comments in ExecReScan).
+ */
+ node = makeNode(SubPlan);
+ node->subLinkType = EXPR_SUBLINK;
+ node->plan_id = list_length(root->glob->subplans);
+ node->plan_name = psprintf("InitPlan %d (returns $%d)",
+ node->plan_id, prm->paramid);
+ get_first_col_type(plan, &node->firstColType, &node->firstColTypmod,
+ &node->firstColCollation);
+ node->setParam = list_make1_int(prm->paramid);
+
+ root->init_plans = lappend(root->init_plans, node);
+
+ /*
+ * The node can't have any inputs (since it's an initplan), so the
+ * parParam and args lists remain empty.
+ */
+
+ /* Set costs of SubPlan using info from the plan tree */
+ cost_subplan(subroot, node, plan);
+}
diff --git a/src/backend/optimizer/prep/Makefile b/src/backend/optimizer/prep/Makefile
new file mode 100644
index 0000000..6f8c6c8
--- /dev/null
+++ b/src/backend/optimizer/prep/Makefile
@@ -0,0 +1,22 @@
+#-------------------------------------------------------------------------
+#
+# Makefile--
+# Makefile for optimizer/prep
+#
+# IDENTIFICATION
+# src/backend/optimizer/prep/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/backend/optimizer/prep
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+
+OBJS = \
+ prepagg.o \
+ prepjointree.o \
+ prepqual.o \
+ preptlist.o \
+ prepunion.o
+
+include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/optimizer/prep/prepagg.c b/src/backend/optimizer/prep/prepagg.c
new file mode 100644
index 0000000..404a5f1
--- /dev/null
+++ b/src/backend/optimizer/prep/prepagg.c
@@ -0,0 +1,674 @@
+/*-------------------------------------------------------------------------
+ *
+ * prepagg.c
+ * Routines to preprocess aggregate function calls
+ *
+ * If there are identical aggregate calls in the query, they only need to
+ * be computed once. Also, some aggregate functions can share the same
+ * transition state, so that we only need to call the final function for
+ * them separately. These optimizations are independent of how the
+ * aggregates are executed.
+ *
+ * preprocess_aggrefs() detects those cases, creates AggInfo and
+ * AggTransInfo structs for each aggregate and transition state that needs
+ * to be computed, and sets the 'aggno' and 'transno' fields in the Aggrefs
+ * accordingly. It also resolves polymorphic transition types, and sets
+ * the 'aggtranstype' fields accordingly.
+ *
+ * XXX: The AggInfo and AggTransInfo structs are thrown away after
+ * planning, so executor startup has to perform some of the same lookups
+ * of transition functions and initial values that we do here. One day, we
+ * might want to carry that information to the Agg nodes to save the effort
+ * at executor startup. The Agg nodes are constructed much later in the
+ * planning, however, so it's not trivial.
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/prep/prepagg.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "catalog/pg_aggregate.h"
+#include "catalog/pg_type.h"
+#include "nodes/nodeFuncs.h"
+#include "nodes/pathnodes.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/plancat.h"
+#include "optimizer/prep.h"
+#include "parser/parse_agg.h"
+#include "utils/builtins.h"
+#include "utils/datum.h"
+#include "utils/fmgroids.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/syscache.h"
+
+static bool preprocess_aggrefs_walker(Node *node, PlannerInfo *root);
+static int find_compatible_agg(PlannerInfo *root, Aggref *newagg,
+ List **same_input_transnos);
+static int find_compatible_trans(PlannerInfo *root, Aggref *newagg,
+ bool shareable,
+ Oid aggtransfn, Oid aggtranstype,
+ int transtypeLen, bool transtypeByVal,
+ Oid aggcombinefn,
+ Oid aggserialfn, Oid aggdeserialfn,
+ Datum initValue, bool initValueIsNull,
+ List *transnos);
+static Datum GetAggInitVal(Datum textInitVal, Oid transtype);
+
+/* -----------------
+ * Resolve the transition type of all Aggrefs, and determine which Aggrefs
+ * can share aggregate or transition state.
+ *
+ * Information about the aggregates and transition functions are collected
+ * in the root->agginfos and root->aggtransinfos lists. The 'aggtranstype',
+ * 'aggno', and 'aggtransno' fields of each Aggref are filled in.
+ *
+ * NOTE: This modifies the Aggrefs in the input expression in-place!
+ *
+ * We try to optimize by detecting duplicate aggregate functions so that
+ * their state and final values are re-used, rather than needlessly being
+ * re-calculated independently. We also detect aggregates that are not
+ * the same, but which can share the same transition state.
+ *
+ * Scenarios:
+ *
+ * 1. Identical aggregate function calls appear in the query:
+ *
+ * SELECT SUM(x) FROM ... HAVING SUM(x) > 0
+ *
+ * Since these aggregates are identical, we only need to calculate
+ * the value once. Both aggregates will share the same 'aggno' value.
+ *
+ * 2. Two different aggregate functions appear in the query, but the
+ * aggregates have the same arguments, transition functions and
+ * initial values (and, presumably, different final functions):
+ *
+ * SELECT AVG(x), STDDEV(x) FROM ...
+ *
+ * In this case we must create a new AggInfo for the varying aggregate,
+ * and we need to call the final functions separately, but we need
+ * only run the transition function once. (This requires that the
+ * final functions be nondestructive of the transition state, but
+ * that's required anyway for other reasons.)
+ *
+ * For either of these optimizations to be valid, all aggregate properties
+ * used in the transition phase must be the same, including any modifiers
+ * such as ORDER BY, DISTINCT and FILTER, and the arguments mustn't
+ * contain any volatile functions.
+ * -----------------
+ */
+void
+preprocess_aggrefs(PlannerInfo *root, Node *clause)
+{
+ (void) preprocess_aggrefs_walker(clause, root);
+}
+
+static void
+preprocess_aggref(Aggref *aggref, PlannerInfo *root)
+{
+ HeapTuple aggTuple;
+ Form_pg_aggregate aggform;
+ Oid aggtransfn;
+ Oid aggfinalfn;
+ Oid aggcombinefn;
+ Oid aggserialfn;
+ Oid aggdeserialfn;
+ Oid aggtranstype;
+ int32 aggtranstypmod;
+ int32 aggtransspace;
+ bool shareable;
+ int aggno;
+ int transno;
+ List *same_input_transnos;
+ int16 resulttypeLen;
+ bool resulttypeByVal;
+ Datum textInitVal;
+ Datum initValue;
+ bool initValueIsNull;
+ bool transtypeByVal;
+ int16 transtypeLen;
+ Oid inputTypes[FUNC_MAX_ARGS];
+ int numArguments;
+
+ Assert(aggref->agglevelsup == 0);
+
+ /*
+ * Fetch info about the aggregate from pg_aggregate. Note it's correct to
+ * ignore the moving-aggregate variant, since what we're concerned with
+ * here is aggregates not window functions.
+ */
+ aggTuple = SearchSysCache1(AGGFNOID,
+ ObjectIdGetDatum(aggref->aggfnoid));
+ if (!HeapTupleIsValid(aggTuple))
+ elog(ERROR, "cache lookup failed for aggregate %u",
+ aggref->aggfnoid);
+ aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple);
+ aggtransfn = aggform->aggtransfn;
+ aggfinalfn = aggform->aggfinalfn;
+ aggcombinefn = aggform->aggcombinefn;
+ aggserialfn = aggform->aggserialfn;
+ aggdeserialfn = aggform->aggdeserialfn;
+ aggtranstype = aggform->aggtranstype;
+ aggtransspace = aggform->aggtransspace;
+
+ /*
+ * Resolve the possibly-polymorphic aggregate transition type.
+ */
+
+ /* extract argument types (ignoring any ORDER BY expressions) */
+ numArguments = get_aggregate_argtypes(aggref, inputTypes);
+
+ /* resolve actual type of transition state, if polymorphic */
+ aggtranstype = resolve_aggregate_transtype(aggref->aggfnoid,
+ aggtranstype,
+ inputTypes,
+ numArguments);
+ aggref->aggtranstype = aggtranstype;
+
+ /*
+ * If transition state is of same type as first aggregated input, assume
+ * it's the same typmod (same width) as well. This works for cases like
+ * MAX/MIN and is probably somewhat reasonable otherwise.
+ */
+ aggtranstypmod = -1;
+ if (aggref->args)
+ {
+ TargetEntry *tle = (TargetEntry *) linitial(aggref->args);
+
+ if (aggtranstype == exprType((Node *) tle->expr))
+ aggtranstypmod = exprTypmod((Node *) tle->expr);
+ }
+
+ /*
+ * If finalfn is marked read-write, we can't share transition states; but
+ * it is okay to share states for AGGMODIFY_SHAREABLE aggs.
+ *
+ * In principle, in a partial aggregate, we could share the transition
+ * state even if the final function is marked as read-write, because the
+ * partial aggregate doesn't execute the final function. But it's too
+ * early to know whether we're going perform a partial aggregate.
+ */
+ shareable = (aggform->aggfinalmodify != AGGMODIFY_READ_WRITE);
+
+ /* get info about the output value's datatype */
+ get_typlenbyval(aggref->aggtype,
+ &resulttypeLen,
+ &resulttypeByVal);
+
+ /* get initial value */
+ textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
+ Anum_pg_aggregate_agginitval,
+ &initValueIsNull);
+ if (initValueIsNull)
+ initValue = (Datum) 0;
+ else
+ initValue = GetAggInitVal(textInitVal, aggtranstype);
+
+ ReleaseSysCache(aggTuple);
+
+ /*
+ * 1. See if this is identical to another aggregate function call that
+ * we've seen already.
+ */
+ aggno = find_compatible_agg(root, aggref, &same_input_transnos);
+ if (aggno != -1)
+ {
+ AggInfo *agginfo = list_nth(root->agginfos, aggno);
+
+ transno = agginfo->transno;
+ }
+ else
+ {
+ AggInfo *agginfo = palloc(sizeof(AggInfo));
+
+ agginfo->finalfn_oid = aggfinalfn;
+ agginfo->representative_aggref = aggref;
+ agginfo->shareable = shareable;
+
+ aggno = list_length(root->agginfos);
+ root->agginfos = lappend(root->agginfos, agginfo);
+
+ /*
+ * Count it, and check for cases requiring ordered input. Note that
+ * ordered-set aggs always have nonempty aggorder. Any ordered-input
+ * case also defeats partial aggregation.
+ */
+ if (aggref->aggorder != NIL || aggref->aggdistinct != NIL)
+ {
+ root->numOrderedAggs++;
+ root->hasNonPartialAggs = true;
+ }
+
+ get_typlenbyval(aggtranstype,
+ &transtypeLen,
+ &transtypeByVal);
+
+ /*
+ * 2. See if this aggregate can share transition state with another
+ * aggregate that we've initialized already.
+ */
+ transno = find_compatible_trans(root, aggref, shareable,
+ aggtransfn, aggtranstype,
+ transtypeLen, transtypeByVal,
+ aggcombinefn,
+ aggserialfn, aggdeserialfn,
+ initValue, initValueIsNull,
+ same_input_transnos);
+ if (transno == -1)
+ {
+ AggTransInfo *transinfo = palloc(sizeof(AggTransInfo));
+
+ transinfo->args = aggref->args;
+ transinfo->aggfilter = aggref->aggfilter;
+ transinfo->transfn_oid = aggtransfn;
+ transinfo->combinefn_oid = aggcombinefn;
+ transinfo->serialfn_oid = aggserialfn;
+ transinfo->deserialfn_oid = aggdeserialfn;
+ transinfo->aggtranstype = aggtranstype;
+ transinfo->aggtranstypmod = aggtranstypmod;
+ transinfo->transtypeLen = transtypeLen;
+ transinfo->transtypeByVal = transtypeByVal;
+ transinfo->aggtransspace = aggtransspace;
+ transinfo->initValue = initValue;
+ transinfo->initValueIsNull = initValueIsNull;
+
+ transno = list_length(root->aggtransinfos);
+ root->aggtransinfos = lappend(root->aggtransinfos, transinfo);
+
+ /*
+ * Check whether partial aggregation is feasible, unless we
+ * already found out that we can't do it.
+ */
+ if (!root->hasNonPartialAggs)
+ {
+ /*
+ * If there is no combine function, then partial aggregation
+ * is not possible.
+ */
+ if (!OidIsValid(transinfo->combinefn_oid))
+ root->hasNonPartialAggs = true;
+
+ /*
+ * If we have any aggs with transtype INTERNAL then we must
+ * check whether they have serialization/deserialization
+ * functions; if not, we can't serialize partial-aggregation
+ * results.
+ */
+ else if (transinfo->aggtranstype == INTERNALOID &&
+ (!OidIsValid(transinfo->serialfn_oid) ||
+ !OidIsValid(transinfo->deserialfn_oid)))
+ root->hasNonSerialAggs = true;
+ }
+ }
+ agginfo->transno = transno;
+ }
+
+ /*
+ * Fill in the fields in the Aggref (aggtranstype was set above already)
+ */
+ aggref->aggno = aggno;
+ aggref->aggtransno = transno;
+}
+
+static bool
+preprocess_aggrefs_walker(Node *node, PlannerInfo *root)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Aggref))
+ {
+ Aggref *aggref = (Aggref *) node;
+
+ preprocess_aggref(aggref, root);
+
+ /*
+ * We assume that the parser checked that there are no aggregates (of
+ * this level anyway) in the aggregated arguments, direct arguments,
+ * or filter clause. Hence, we need not recurse into any of them.
+ */
+ return false;
+ }
+ Assert(!IsA(node, SubLink));
+ return expression_tree_walker(node, preprocess_aggrefs_walker,
+ (void *) root);
+}
+
+
+/*
+ * find_compatible_agg - search for a previously initialized per-Agg struct
+ *
+ * Searches the previously looked at aggregates to find one which is compatible
+ * with this one, with the same input parameters. If no compatible aggregate
+ * can be found, returns -1.
+ *
+ * As a side-effect, this also collects a list of existing, shareable per-Trans
+ * structs with matching inputs. If no identical Aggref is found, the list is
+ * passed later to find_compatible_trans, to see if we can at least reuse
+ * the state value of another aggregate.
+ */
+static int
+find_compatible_agg(PlannerInfo *root, Aggref *newagg,
+ List **same_input_transnos)
+{
+ ListCell *lc;
+ int aggno;
+
+ *same_input_transnos = NIL;
+
+ /* we mustn't reuse the aggref if it contains volatile function calls */
+ if (contain_volatile_functions((Node *) newagg))
+ return -1;
+
+ /*
+ * Search through the list of already seen aggregates. If we find an
+ * existing identical aggregate call, then we can re-use that one. While
+ * searching, we'll also collect a list of Aggrefs with the same input
+ * parameters. If no matching Aggref is found, the caller can potentially
+ * still re-use the transition state of one of them. (At this stage we
+ * just compare the parsetrees; whether different aggregates share the
+ * same transition function will be checked later.)
+ */
+ aggno = -1;
+ foreach(lc, root->agginfos)
+ {
+ AggInfo *agginfo = (AggInfo *) lfirst(lc);
+ Aggref *existingRef;
+
+ aggno++;
+
+ existingRef = agginfo->representative_aggref;
+
+ /* all of the following must be the same or it's no match */
+ if (newagg->inputcollid != existingRef->inputcollid ||
+ newagg->aggtranstype != existingRef->aggtranstype ||
+ newagg->aggstar != existingRef->aggstar ||
+ newagg->aggvariadic != existingRef->aggvariadic ||
+ newagg->aggkind != existingRef->aggkind ||
+ !equal(newagg->args, existingRef->args) ||
+ !equal(newagg->aggorder, existingRef->aggorder) ||
+ !equal(newagg->aggdistinct, existingRef->aggdistinct) ||
+ !equal(newagg->aggfilter, existingRef->aggfilter))
+ continue;
+
+ /* if it's the same aggregate function then report exact match */
+ if (newagg->aggfnoid == existingRef->aggfnoid &&
+ newagg->aggtype == existingRef->aggtype &&
+ newagg->aggcollid == existingRef->aggcollid &&
+ equal(newagg->aggdirectargs, existingRef->aggdirectargs))
+ {
+ list_free(*same_input_transnos);
+ *same_input_transnos = NIL;
+ return aggno;
+ }
+
+ /*
+ * Not identical, but it had the same inputs. If the final function
+ * permits sharing, return its transno to the caller, in case we can
+ * re-use its per-trans state. (If there's already sharing going on,
+ * we might report a transno more than once. find_compatible_trans is
+ * cheap enough that it's not worth spending cycles to avoid that.)
+ */
+ if (agginfo->shareable)
+ *same_input_transnos = lappend_int(*same_input_transnos,
+ agginfo->transno);
+ }
+
+ return -1;
+}
+
+/*
+ * find_compatible_trans - search for a previously initialized per-Trans
+ * struct
+ *
+ * Searches the list of transnos for a per-Trans struct with the same
+ * transition function and initial condition. (The inputs have already been
+ * verified to match.)
+ */
+static int
+find_compatible_trans(PlannerInfo *root, Aggref *newagg, bool shareable,
+ Oid aggtransfn, Oid aggtranstype,
+ int transtypeLen, bool transtypeByVal,
+ Oid aggcombinefn,
+ Oid aggserialfn, Oid aggdeserialfn,
+ Datum initValue, bool initValueIsNull,
+ List *transnos)
+{
+ ListCell *lc;
+
+ /* If this aggregate can't share transition states, give up */
+ if (!shareable)
+ return -1;
+
+ foreach(lc, transnos)
+ {
+ int transno = lfirst_int(lc);
+ AggTransInfo *pertrans = (AggTransInfo *) list_nth(root->aggtransinfos, transno);
+
+ /*
+ * if the transfns or transition state types are not the same then the
+ * state can't be shared.
+ */
+ if (aggtransfn != pertrans->transfn_oid ||
+ aggtranstype != pertrans->aggtranstype)
+ continue;
+
+ /*
+ * The serialization and deserialization functions must match, if
+ * present, as we're unable to share the trans state for aggregates
+ * which will serialize or deserialize into different formats.
+ * Remember that these will be InvalidOid if they're not required for
+ * this agg node.
+ */
+ if (aggserialfn != pertrans->serialfn_oid ||
+ aggdeserialfn != pertrans->deserialfn_oid)
+ continue;
+
+ /*
+ * Combine function must also match. We only care about the combine
+ * function with partial aggregates, but it's too early in the
+ * planning to know if we will do partial aggregation, so be
+ * conservative.
+ */
+ if (aggcombinefn != pertrans->combinefn_oid)
+ continue;
+
+ /*
+ * Check that the initial condition matches, too.
+ */
+ if (initValueIsNull && pertrans->initValueIsNull)
+ return transno;
+
+ if (!initValueIsNull && !pertrans->initValueIsNull &&
+ datumIsEqual(initValue, pertrans->initValue,
+ transtypeByVal, transtypeLen))
+ return transno;
+ }
+ return -1;
+}
+
+static Datum
+GetAggInitVal(Datum textInitVal, Oid transtype)
+{
+ Oid typinput,
+ typioparam;
+ char *strInitVal;
+ Datum initVal;
+
+ getTypeInputInfo(transtype, &typinput, &typioparam);
+ strInitVal = TextDatumGetCString(textInitVal);
+ initVal = OidInputFunctionCall(typinput, strInitVal,
+ typioparam, -1);
+ pfree(strInitVal);
+ return initVal;
+}
+
+
+/*
+ * get_agg_clause_costs
+ * Process the PlannerInfo's 'aggtransinfos' and 'agginfos' lists
+ * accumulating the cost information about them.
+ *
+ * 'aggsplit' tells us the expected partial-aggregation mode, which affects
+ * the cost estimates.
+ *
+ * NOTE that the costs are ADDED to those already in *costs ... so the caller
+ * is responsible for zeroing the struct initially.
+ *
+ * For each AggTransInfo, we add the cost of an aggregate transition using
+ * either the transfn or combinefn depending on the 'aggsplit' value. We also
+ * account for the costs of any aggfilters and any serializations and
+ * deserializations of the transition state and also estimate the total space
+ * needed for the transition states as if each aggregate's state was stored in
+ * memory concurrently (as would be done in a HashAgg plan).
+ *
+ * For each AggInfo in the 'agginfos' list we add the cost of running the
+ * final function and the direct args, if any.
+ */
+void
+get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
+{
+ ListCell *lc;
+
+ foreach(lc, root->aggtransinfos)
+ {
+ AggTransInfo *transinfo = (AggTransInfo *) lfirst(lc);
+
+ /*
+ * Add the appropriate component function execution costs to
+ * appropriate totals.
+ */
+ if (DO_AGGSPLIT_COMBINE(aggsplit))
+ {
+ /* charge for combining previously aggregated states */
+ add_function_cost(root, transinfo->combinefn_oid, NULL,
+ &costs->transCost);
+ }
+ else
+ add_function_cost(root, transinfo->transfn_oid, NULL,
+ &costs->transCost);
+ if (DO_AGGSPLIT_DESERIALIZE(aggsplit) &&
+ OidIsValid(transinfo->deserialfn_oid))
+ add_function_cost(root, transinfo->deserialfn_oid, NULL,
+ &costs->transCost);
+ if (DO_AGGSPLIT_SERIALIZE(aggsplit) &&
+ OidIsValid(transinfo->serialfn_oid))
+ add_function_cost(root, transinfo->serialfn_oid, NULL,
+ &costs->finalCost);
+
+ /*
+ * These costs are incurred only by the initial aggregate node, so we
+ * mustn't include them again at upper levels.
+ */
+ if (!DO_AGGSPLIT_COMBINE(aggsplit))
+ {
+ /* add the input expressions' cost to per-input-row costs */
+ QualCost argcosts;
+
+ cost_qual_eval_node(&argcosts, (Node *) transinfo->args, root);
+ costs->transCost.startup += argcosts.startup;
+ costs->transCost.per_tuple += argcosts.per_tuple;
+
+ /*
+ * Add any filter's cost to per-input-row costs.
+ *
+ * XXX Ideally we should reduce input expression costs according
+ * to filter selectivity, but it's not clear it's worth the
+ * trouble.
+ */
+ if (transinfo->aggfilter)
+ {
+ cost_qual_eval_node(&argcosts, (Node *) transinfo->aggfilter,
+ root);
+ costs->transCost.startup += argcosts.startup;
+ costs->transCost.per_tuple += argcosts.per_tuple;
+ }
+ }
+
+ /*
+ * If the transition type is pass-by-value then it doesn't add
+ * anything to the required size of the hashtable. If it is
+ * pass-by-reference then we have to add the estimated size of the
+ * value itself, plus palloc overhead.
+ */
+ if (!transinfo->transtypeByVal)
+ {
+ int32 avgwidth;
+
+ /* Use average width if aggregate definition gave one */
+ if (transinfo->aggtransspace > 0)
+ avgwidth = transinfo->aggtransspace;
+ else if (transinfo->transfn_oid == F_ARRAY_APPEND)
+ {
+ /*
+ * If the transition function is array_append(), it'll use an
+ * expanded array as transvalue, which will occupy at least
+ * ALLOCSET_SMALL_INITSIZE and possibly more. Use that as the
+ * estimate for lack of a better idea.
+ */
+ avgwidth = ALLOCSET_SMALL_INITSIZE;
+ }
+ else
+ {
+ avgwidth = get_typavgwidth(transinfo->aggtranstype, transinfo->aggtranstypmod);
+ }
+
+ avgwidth = MAXALIGN(avgwidth);
+ costs->transitionSpace += avgwidth + 2 * sizeof(void *);
+ }
+ else if (transinfo->aggtranstype == INTERNALOID)
+ {
+ /*
+ * INTERNAL transition type is a special case: although INTERNAL
+ * is pass-by-value, it's almost certainly being used as a pointer
+ * to some large data structure. The aggregate definition can
+ * provide an estimate of the size. If it doesn't, then we assume
+ * ALLOCSET_DEFAULT_INITSIZE, which is a good guess if the data is
+ * being kept in a private memory context, as is done by
+ * array_agg() for instance.
+ */
+ if (transinfo->aggtransspace > 0)
+ costs->transitionSpace += transinfo->aggtransspace;
+ else
+ costs->transitionSpace += ALLOCSET_DEFAULT_INITSIZE;
+ }
+ }
+
+ foreach(lc, root->agginfos)
+ {
+ AggInfo *agginfo = (AggInfo *) lfirst(lc);
+ Aggref *aggref = agginfo->representative_aggref;
+
+ /*
+ * Add the appropriate component function execution costs to
+ * appropriate totals.
+ */
+ if (!DO_AGGSPLIT_SKIPFINAL(aggsplit) &&
+ OidIsValid(agginfo->finalfn_oid))
+ add_function_cost(root, agginfo->finalfn_oid, NULL,
+ &costs->finalCost);
+
+ /*
+ * If there are direct arguments, treat their evaluation cost like the
+ * cost of the finalfn.
+ */
+ if (aggref->aggdirectargs)
+ {
+ QualCost argcosts;
+
+ cost_qual_eval_node(&argcosts, (Node *) aggref->aggdirectargs,
+ root);
+ costs->finalCost.startup += argcosts.startup;
+ costs->finalCost.per_tuple += argcosts.per_tuple;
+ }
+ }
+}
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
new file mode 100644
index 0000000..7f4bb7b
--- /dev/null
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -0,0 +1,3746 @@
+/*-------------------------------------------------------------------------
+ *
+ * prepjointree.c
+ * Planner preprocessing for subqueries and join tree manipulation.
+ *
+ * NOTE: the intended sequence for invoking these operations is
+ * replace_empty_jointree
+ * pull_up_sublinks
+ * preprocess_function_rtes
+ * pull_up_subqueries
+ * flatten_simple_union_all
+ * do expression preprocessing (including flattening JOIN alias vars)
+ * reduce_outer_joins
+ * remove_useless_result_rtes
+ *
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/prep/prepjointree.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "catalog/pg_type.h"
+#include "funcapi.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/placeholder.h"
+#include "optimizer/prep.h"
+#include "optimizer/subselect.h"
+#include "optimizer/tlist.h"
+#include "parser/parse_relation.h"
+#include "parser/parsetree.h"
+#include "rewrite/rewriteManip.h"
+
+
+typedef struct pullup_replace_vars_context
+{
+ PlannerInfo *root;
+ List *targetlist; /* tlist of subquery being pulled up */
+ RangeTblEntry *target_rte; /* RTE of subquery */
+ Relids relids; /* relids within subquery, as numbered after
+ * pullup (set only if target_rte->lateral) */
+ bool *outer_hasSubLinks; /* -> outer query's hasSubLinks */
+ int varno; /* varno of subquery */
+ bool need_phvs; /* do we need PlaceHolderVars? */
+ bool wrap_non_vars; /* do we need 'em on *all* non-Vars? */
+ Node **rv_cache; /* cache for results with PHVs */
+} pullup_replace_vars_context;
+
+typedef struct reduce_outer_joins_state
+{
+ Relids relids; /* base relids within this subtree */
+ bool contains_outer; /* does subtree contain outer join(s)? */
+ List *sub_states; /* List of states for subtree components */
+} reduce_outer_joins_state;
+
+static Node *pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
+ Relids *relids);
+static Node *pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
+ Node **jtlink1, Relids available_rels1,
+ Node **jtlink2, Relids available_rels2);
+static Node *pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode,
+ JoinExpr *lowest_outer_join,
+ JoinExpr *lowest_nulling_outer_join,
+ AppendRelInfo *containing_appendrel);
+static Node *pull_up_simple_subquery(PlannerInfo *root, Node *jtnode,
+ RangeTblEntry *rte,
+ JoinExpr *lowest_outer_join,
+ JoinExpr *lowest_nulling_outer_join,
+ AppendRelInfo *containing_appendrel);
+static Node *pull_up_simple_union_all(PlannerInfo *root, Node *jtnode,
+ RangeTblEntry *rte);
+static void pull_up_union_leaf_queries(Node *setOp, PlannerInfo *root,
+ int parentRTindex, Query *setOpQuery,
+ int childRToffset);
+static void make_setop_translation_list(Query *query, int newvarno,
+ AppendRelInfo *appinfo);
+static bool is_simple_subquery(PlannerInfo *root, Query *subquery,
+ RangeTblEntry *rte,
+ JoinExpr *lowest_outer_join);
+static Node *pull_up_simple_values(PlannerInfo *root, Node *jtnode,
+ RangeTblEntry *rte);
+static bool is_simple_values(PlannerInfo *root, RangeTblEntry *rte);
+static Node *pull_up_constant_function(PlannerInfo *root, Node *jtnode,
+ RangeTblEntry *rte,
+ JoinExpr *lowest_nulling_outer_join,
+ AppendRelInfo *containing_appendrel);
+static bool is_simple_union_all(Query *subquery);
+static bool is_simple_union_all_recurse(Node *setOp, Query *setOpQuery,
+ List *colTypes);
+static bool is_safe_append_member(Query *subquery);
+static bool jointree_contains_lateral_outer_refs(PlannerInfo *root,
+ Node *jtnode, bool restricted,
+ Relids safe_upper_varnos);
+static void perform_pullup_replace_vars(PlannerInfo *root,
+ pullup_replace_vars_context *rvcontext,
+ JoinExpr *lowest_nulling_outer_join,
+ AppendRelInfo *containing_appendrel);
+static void replace_vars_in_jointree(Node *jtnode,
+ pullup_replace_vars_context *context,
+ JoinExpr *lowest_nulling_outer_join);
+static Node *pullup_replace_vars(Node *expr,
+ pullup_replace_vars_context *context);
+static Node *pullup_replace_vars_callback(Var *var,
+ replace_rte_variables_context *context);
+static Query *pullup_replace_vars_subquery(Query *query,
+ pullup_replace_vars_context *context);
+static reduce_outer_joins_state *reduce_outer_joins_pass1(Node *jtnode);
+static void reduce_outer_joins_pass2(Node *jtnode,
+ reduce_outer_joins_state *state,
+ PlannerInfo *root,
+ Relids nonnullable_rels,
+ List *nonnullable_vars,
+ List *forced_null_vars);
+static Node *remove_useless_results_recurse(PlannerInfo *root, Node *jtnode);
+static int get_result_relid(PlannerInfo *root, Node *jtnode);
+static void remove_result_refs(PlannerInfo *root, int varno, Node *newjtloc);
+static bool find_dependent_phvs(PlannerInfo *root, int varno);
+static bool find_dependent_phvs_in_jointree(PlannerInfo *root,
+ Node *node, int varno);
+static void substitute_phv_relids(Node *node,
+ int varno, Relids subrelids);
+static void fix_append_rel_relids(List *append_rel_list, int varno,
+ Relids subrelids);
+static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
+
+
+/*
+ * transform_MERGE_to_join
+ * Replace a MERGE's jointree to also include the target relation.
+ */
+void
+transform_MERGE_to_join(Query *parse)
+{
+ RangeTblEntry *joinrte;
+ JoinExpr *joinexpr;
+ JoinType jointype;
+ int joinrti;
+ List *vars;
+
+ if (parse->commandType != CMD_MERGE)
+ return;
+
+ /* XXX probably bogus */
+ vars = NIL;
+
+ /*
+ * When any WHEN NOT MATCHED THEN INSERT clauses exist, we need to use an
+ * outer join so that we process all unmatched tuples from the source
+ * relation. If none exist, we can use an inner join.
+ */
+ if (parse->mergeUseOuterJoin)
+ jointype = JOIN_RIGHT;
+ else
+ jointype = JOIN_INNER;
+
+ /* Manufacture a join RTE to use. */
+ joinrte = makeNode(RangeTblEntry);
+ joinrte->rtekind = RTE_JOIN;
+ joinrte->jointype = jointype;
+ joinrte->joinmergedcols = 0;
+ joinrte->joinaliasvars = vars;
+ joinrte->joinleftcols = NIL; /* MERGE does not allow JOIN USING */
+ joinrte->joinrightcols = NIL; /* ditto */
+ joinrte->join_using_alias = NULL;
+
+ joinrte->alias = NULL;
+ joinrte->eref = makeAlias("*MERGE*", NIL);
+ joinrte->lateral = false;
+ joinrte->inh = false;
+ joinrte->inFromCl = true;
+ joinrte->requiredPerms = 0;
+ joinrte->checkAsUser = InvalidOid;
+ joinrte->selectedCols = NULL;
+ joinrte->insertedCols = NULL;
+ joinrte->updatedCols = NULL;
+ joinrte->extraUpdatedCols = NULL;
+ joinrte->securityQuals = NIL;
+
+ /*
+ * Add completed RTE to pstate's range table list, so that we know its
+ * index.
+ */
+ parse->rtable = lappend(parse->rtable, joinrte);
+ joinrti = list_length(parse->rtable);
+
+ /*
+ * Create a JOIN between the target and the source relation.
+ */
+ joinexpr = makeNode(JoinExpr);
+ joinexpr->jointype = jointype;
+ joinexpr->isNatural = false;
+ joinexpr->larg = (Node *) makeNode(RangeTblRef);
+ ((RangeTblRef *) joinexpr->larg)->rtindex = parse->resultRelation;
+ joinexpr->rarg = linitial(parse->jointree->fromlist); /* original join */
+ joinexpr->usingClause = NIL;
+ joinexpr->join_using_alias = NULL;
+ /* The quals are removed from the jointree and into this specific join */
+ joinexpr->quals = parse->jointree->quals;
+ joinexpr->alias = NULL;
+ joinexpr->rtindex = joinrti;
+
+ /* Make the new join be the sole entry in the query's jointree */
+ parse->jointree->fromlist = list_make1(joinexpr);
+ parse->jointree->quals = NULL;
+}
+
+/*
+ * replace_empty_jointree
+ * If the Query's jointree is empty, replace it with a dummy RTE_RESULT
+ * relation.
+ *
+ * By doing this, we can avoid a bunch of corner cases that formerly existed
+ * for SELECTs with omitted FROM clauses. An example is that a subquery
+ * with empty jointree previously could not be pulled up, because that would
+ * have resulted in an empty relid set, making the subquery not uniquely
+ * identifiable for join or PlaceHolderVar processing.
+ *
+ * Unlike most other functions in this file, this function doesn't recurse;
+ * we rely on other processing to invoke it on sub-queries at suitable times.
+ */
+void
+replace_empty_jointree(Query *parse)
+{
+ RangeTblEntry *rte;
+ Index rti;
+ RangeTblRef *rtr;
+
+ /* Nothing to do if jointree is already nonempty */
+ if (parse->jointree->fromlist != NIL)
+ return;
+
+ /* We mustn't change it in the top level of a setop tree, either */
+ if (parse->setOperations)
+ return;
+
+ /* Create suitable RTE */
+ rte = makeNode(RangeTblEntry);
+ rte->rtekind = RTE_RESULT;
+ rte->eref = makeAlias("*RESULT*", NIL);
+
+ /* Add it to rangetable */
+ parse->rtable = lappend(parse->rtable, rte);
+ rti = list_length(parse->rtable);
+
+ /* And jam a reference into the jointree */
+ rtr = makeNode(RangeTblRef);
+ rtr->rtindex = rti;
+ parse->jointree->fromlist = list_make1(rtr);
+}
+
+/*
+ * pull_up_sublinks
+ * Attempt to pull up ANY and EXISTS SubLinks to be treated as
+ * semijoins or anti-semijoins.
+ *
+ * A clause "foo op ANY (sub-SELECT)" can be processed by pulling the
+ * sub-SELECT up to become a rangetable entry and treating the implied
+ * comparisons as quals of a semijoin. However, this optimization *only*
+ * works at the top level of WHERE or a JOIN/ON clause, because we cannot
+ * distinguish whether the ANY ought to return FALSE or NULL in cases
+ * involving NULL inputs. Also, in an outer join's ON clause we can only
+ * do this if the sublink is degenerate (ie, references only the nullable
+ * side of the join). In that case it is legal to push the semijoin
+ * down into the nullable side of the join. If the sublink references any
+ * nonnullable-side variables then it would have to be evaluated as part
+ * of the outer join, which makes things way too complicated.
+ *
+ * Under similar conditions, EXISTS and NOT EXISTS clauses can be handled
+ * by pulling up the sub-SELECT and creating a semijoin or anti-semijoin.
+ *
+ * This routine searches for such clauses and does the necessary parsetree
+ * transformations if any are found.
+ *
+ * This routine has to run before preprocess_expression(), so the quals
+ * clauses are not yet reduced to implicit-AND format, and are not guaranteed
+ * to be AND/OR-flat either. That means we need to recursively search through
+ * explicit AND clauses. We stop as soon as we hit a non-AND item.
+ */
+void
+pull_up_sublinks(PlannerInfo *root)
+{
+ Node *jtnode;
+ Relids relids;
+
+ /* Begin recursion through the jointree */
+ jtnode = pull_up_sublinks_jointree_recurse(root,
+ (Node *) root->parse->jointree,
+ &relids);
+
+ /*
+ * root->parse->jointree must always be a FromExpr, so insert a dummy one
+ * if we got a bare RangeTblRef or JoinExpr out of the recursion.
+ */
+ if (IsA(jtnode, FromExpr))
+ root->parse->jointree = (FromExpr *) jtnode;
+ else
+ root->parse->jointree = makeFromExpr(list_make1(jtnode), NULL);
+}
+
+/*
+ * Recurse through jointree nodes for pull_up_sublinks()
+ *
+ * In addition to returning the possibly-modified jointree node, we return
+ * a relids set of the contained rels into *relids.
+ */
+static Node *
+pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
+ Relids *relids)
+{
+ /* Since this function recurses, it could be driven to stack overflow. */
+ check_stack_depth();
+
+ if (jtnode == NULL)
+ {
+ *relids = NULL;
+ }
+ else if (IsA(jtnode, RangeTblRef))
+ {
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+
+ *relids = bms_make_singleton(varno);
+ /* jtnode is returned unmodified */
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ List *newfromlist = NIL;
+ Relids frelids = NULL;
+ FromExpr *newf;
+ Node *jtlink;
+ ListCell *l;
+
+ /* First, recurse to process children and collect their relids */
+ foreach(l, f->fromlist)
+ {
+ Node *newchild;
+ Relids childrelids;
+
+ newchild = pull_up_sublinks_jointree_recurse(root,
+ lfirst(l),
+ &childrelids);
+ newfromlist = lappend(newfromlist, newchild);
+ frelids = bms_join(frelids, childrelids);
+ }
+ /* Build the replacement FromExpr; no quals yet */
+ newf = makeFromExpr(newfromlist, NULL);
+ /* Set up a link representing the rebuilt jointree */
+ jtlink = (Node *) newf;
+ /* Now process qual --- all children are available for use */
+ newf->quals = pull_up_sublinks_qual_recurse(root, f->quals,
+ &jtlink, frelids,
+ NULL, NULL);
+
+ /*
+ * Note that the result will be either newf, or a stack of JoinExprs
+ * with newf at the base. We rely on subsequent optimization steps to
+ * flatten this and rearrange the joins as needed.
+ *
+ * Although we could include the pulled-up subqueries in the returned
+ * relids, there's no need since upper quals couldn't refer to their
+ * outputs anyway.
+ */
+ *relids = frelids;
+ jtnode = jtlink;
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j;
+ Relids leftrelids;
+ Relids rightrelids;
+ Node *jtlink;
+
+ /*
+ * Make a modifiable copy of join node, but don't bother copying its
+ * subnodes (yet).
+ */
+ j = (JoinExpr *) palloc(sizeof(JoinExpr));
+ memcpy(j, jtnode, sizeof(JoinExpr));
+ jtlink = (Node *) j;
+
+ /* Recurse to process children and collect their relids */
+ j->larg = pull_up_sublinks_jointree_recurse(root, j->larg,
+ &leftrelids);
+ j->rarg = pull_up_sublinks_jointree_recurse(root, j->rarg,
+ &rightrelids);
+
+ /*
+ * Now process qual, showing appropriate child relids as available,
+ * and attach any pulled-up jointree items at the right place. In the
+ * inner-join case we put new JoinExprs above the existing one (much
+ * as for a FromExpr-style join). In outer-join cases the new
+ * JoinExprs must go into the nullable side of the outer join. The
+ * point of the available_rels machinations is to ensure that we only
+ * pull up quals for which that's okay.
+ *
+ * We don't expect to see any pre-existing JOIN_SEMI or JOIN_ANTI
+ * nodes here.
+ */
+ switch (j->jointype)
+ {
+ case JOIN_INNER:
+ j->quals = pull_up_sublinks_qual_recurse(root, j->quals,
+ &jtlink,
+ bms_union(leftrelids,
+ rightrelids),
+ NULL, NULL);
+ break;
+ case JOIN_LEFT:
+ j->quals = pull_up_sublinks_qual_recurse(root, j->quals,
+ &j->rarg,
+ rightrelids,
+ NULL, NULL);
+ break;
+ case JOIN_FULL:
+ /* can't do anything with full-join quals */
+ break;
+ case JOIN_RIGHT:
+ j->quals = pull_up_sublinks_qual_recurse(root, j->quals,
+ &j->larg,
+ leftrelids,
+ NULL, NULL);
+ break;
+ default:
+ elog(ERROR, "unrecognized join type: %d",
+ (int) j->jointype);
+ break;
+ }
+
+ /*
+ * Although we could include the pulled-up subqueries in the returned
+ * relids, there's no need since upper quals couldn't refer to their
+ * outputs anyway. But we *do* need to include the join's own rtindex
+ * because we haven't yet collapsed join alias variables, so upper
+ * levels would mistakenly think they couldn't use references to this
+ * join.
+ */
+ *relids = bms_join(leftrelids, rightrelids);
+ if (j->rtindex)
+ *relids = bms_add_member(*relids, j->rtindex);
+ jtnode = jtlink;
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+ return jtnode;
+}
+
+/*
+ * Recurse through top-level qual nodes for pull_up_sublinks()
+ *
+ * jtlink1 points to the link in the jointree where any new JoinExprs should
+ * be inserted if they reference available_rels1 (i.e., available_rels1
+ * denotes the relations present underneath jtlink1). Optionally, jtlink2 can
+ * point to a second link where new JoinExprs should be inserted if they
+ * reference available_rels2 (pass NULL for both those arguments if not used).
+ * Note that SubLinks referencing both sets of variables cannot be optimized.
+ * If we find multiple pull-up-able SubLinks, they'll get stacked onto jtlink1
+ * and/or jtlink2 in the order we encounter them. We rely on subsequent
+ * optimization to rearrange the stack if appropriate.
+ *
+ * Returns the replacement qual node, or NULL if the qual should be removed.
+ */
+static Node *
+pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
+ Node **jtlink1, Relids available_rels1,
+ Node **jtlink2, Relids available_rels2)
+{
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, SubLink))
+ {
+ SubLink *sublink = (SubLink *) node;
+ JoinExpr *j;
+ Relids child_rels;
+
+ /* Is it a convertible ANY or EXISTS clause? */
+ if (sublink->subLinkType == ANY_SUBLINK)
+ {
+ if ((j = convert_ANY_sublink_to_join(root, sublink,
+ available_rels1)) != NULL)
+ {
+ /* Yes; insert the new join node into the join tree */
+ j->larg = *jtlink1;
+ *jtlink1 = (Node *) j;
+ /* Recursively process pulled-up jointree nodes */
+ j->rarg = pull_up_sublinks_jointree_recurse(root,
+ j->rarg,
+ &child_rels);
+
+ /*
+ * Now recursively process the pulled-up quals. Any inserted
+ * joins can get stacked onto either j->larg or j->rarg,
+ * depending on which rels they reference.
+ */
+ j->quals = pull_up_sublinks_qual_recurse(root,
+ j->quals,
+ &j->larg,
+ available_rels1,
+ &j->rarg,
+ child_rels);
+ /* Return NULL representing constant TRUE */
+ return NULL;
+ }
+ if (available_rels2 != NULL &&
+ (j = convert_ANY_sublink_to_join(root, sublink,
+ available_rels2)) != NULL)
+ {
+ /* Yes; insert the new join node into the join tree */
+ j->larg = *jtlink2;
+ *jtlink2 = (Node *) j;
+ /* Recursively process pulled-up jointree nodes */
+ j->rarg = pull_up_sublinks_jointree_recurse(root,
+ j->rarg,
+ &child_rels);
+
+ /*
+ * Now recursively process the pulled-up quals. Any inserted
+ * joins can get stacked onto either j->larg or j->rarg,
+ * depending on which rels they reference.
+ */
+ j->quals = pull_up_sublinks_qual_recurse(root,
+ j->quals,
+ &j->larg,
+ available_rels2,
+ &j->rarg,
+ child_rels);
+ /* Return NULL representing constant TRUE */
+ return NULL;
+ }
+ }
+ else if (sublink->subLinkType == EXISTS_SUBLINK)
+ {
+ if ((j = convert_EXISTS_sublink_to_join(root, sublink, false,
+ available_rels1)) != NULL)
+ {
+ /* Yes; insert the new join node into the join tree */
+ j->larg = *jtlink1;
+ *jtlink1 = (Node *) j;
+ /* Recursively process pulled-up jointree nodes */
+ j->rarg = pull_up_sublinks_jointree_recurse(root,
+ j->rarg,
+ &child_rels);
+
+ /*
+ * Now recursively process the pulled-up quals. Any inserted
+ * joins can get stacked onto either j->larg or j->rarg,
+ * depending on which rels they reference.
+ */
+ j->quals = pull_up_sublinks_qual_recurse(root,
+ j->quals,
+ &j->larg,
+ available_rels1,
+ &j->rarg,
+ child_rels);
+ /* Return NULL representing constant TRUE */
+ return NULL;
+ }
+ if (available_rels2 != NULL &&
+ (j = convert_EXISTS_sublink_to_join(root, sublink, false,
+ available_rels2)) != NULL)
+ {
+ /* Yes; insert the new join node into the join tree */
+ j->larg = *jtlink2;
+ *jtlink2 = (Node *) j;
+ /* Recursively process pulled-up jointree nodes */
+ j->rarg = pull_up_sublinks_jointree_recurse(root,
+ j->rarg,
+ &child_rels);
+
+ /*
+ * Now recursively process the pulled-up quals. Any inserted
+ * joins can get stacked onto either j->larg or j->rarg,
+ * depending on which rels they reference.
+ */
+ j->quals = pull_up_sublinks_qual_recurse(root,
+ j->quals,
+ &j->larg,
+ available_rels2,
+ &j->rarg,
+ child_rels);
+ /* Return NULL representing constant TRUE */
+ return NULL;
+ }
+ }
+ /* Else return it unmodified */
+ return node;
+ }
+ if (is_notclause(node))
+ {
+ /* If the immediate argument of NOT is EXISTS, try to convert */
+ SubLink *sublink = (SubLink *) get_notclausearg((Expr *) node);
+ JoinExpr *j;
+ Relids child_rels;
+
+ if (sublink && IsA(sublink, SubLink))
+ {
+ if (sublink->subLinkType == EXISTS_SUBLINK)
+ {
+ if ((j = convert_EXISTS_sublink_to_join(root, sublink, true,
+ available_rels1)) != NULL)
+ {
+ /* Yes; insert the new join node into the join tree */
+ j->larg = *jtlink1;
+ *jtlink1 = (Node *) j;
+ /* Recursively process pulled-up jointree nodes */
+ j->rarg = pull_up_sublinks_jointree_recurse(root,
+ j->rarg,
+ &child_rels);
+
+ /*
+ * Now recursively process the pulled-up quals. Because
+ * we are underneath a NOT, we can't pull up sublinks that
+ * reference the left-hand stuff, but it's still okay to
+ * pull up sublinks referencing j->rarg.
+ */
+ j->quals = pull_up_sublinks_qual_recurse(root,
+ j->quals,
+ &j->rarg,
+ child_rels,
+ NULL, NULL);
+ /* Return NULL representing constant TRUE */
+ return NULL;
+ }
+ if (available_rels2 != NULL &&
+ (j = convert_EXISTS_sublink_to_join(root, sublink, true,
+ available_rels2)) != NULL)
+ {
+ /* Yes; insert the new join node into the join tree */
+ j->larg = *jtlink2;
+ *jtlink2 = (Node *) j;
+ /* Recursively process pulled-up jointree nodes */
+ j->rarg = pull_up_sublinks_jointree_recurse(root,
+ j->rarg,
+ &child_rels);
+
+ /*
+ * Now recursively process the pulled-up quals. Because
+ * we are underneath a NOT, we can't pull up sublinks that
+ * reference the left-hand stuff, but it's still okay to
+ * pull up sublinks referencing j->rarg.
+ */
+ j->quals = pull_up_sublinks_qual_recurse(root,
+ j->quals,
+ &j->rarg,
+ child_rels,
+ NULL, NULL);
+ /* Return NULL representing constant TRUE */
+ return NULL;
+ }
+ }
+ }
+ /* Else return it unmodified */
+ return node;
+ }
+ if (is_andclause(node))
+ {
+ /* Recurse into AND clause */
+ List *newclauses = NIL;
+ ListCell *l;
+
+ foreach(l, ((BoolExpr *) node)->args)
+ {
+ Node *oldclause = (Node *) lfirst(l);
+ Node *newclause;
+
+ newclause = pull_up_sublinks_qual_recurse(root,
+ oldclause,
+ jtlink1,
+ available_rels1,
+ jtlink2,
+ available_rels2);
+ if (newclause)
+ newclauses = lappend(newclauses, newclause);
+ }
+ /* We might have got back fewer clauses than we started with */
+ if (newclauses == NIL)
+ return NULL;
+ else if (list_length(newclauses) == 1)
+ return (Node *) linitial(newclauses);
+ else
+ return (Node *) make_andclause(newclauses);
+ }
+ /* Stop if not an AND */
+ return node;
+}
+
+/*
+ * preprocess_function_rtes
+ * Constant-simplify any FUNCTION RTEs in the FROM clause, and then
+ * attempt to "inline" any that are set-returning functions.
+ *
+ * If an RTE_FUNCTION rtable entry invokes a set-returning function that
+ * contains just a simple SELECT, we can convert the rtable entry to an
+ * RTE_SUBQUERY entry exposing the SELECT directly. This is especially
+ * useful if the subquery can then be "pulled up" for further optimization,
+ * but we do it even if not, to reduce executor overhead.
+ *
+ * This has to be done before we have started to do any optimization of
+ * subqueries, else any such steps wouldn't get applied to subqueries
+ * obtained via inlining. However, we do it after pull_up_sublinks
+ * so that we can inline any functions used in SubLink subselects.
+ *
+ * The reason for applying const-simplification at this stage is that
+ * (a) we'd need to do it anyway to inline a SRF, and (b) by doing it now,
+ * we can be sure that pull_up_constant_function() will see constants
+ * if there are constants to be seen. This approach also guarantees
+ * that every FUNCTION RTE has been const-simplified, allowing planner.c's
+ * preprocess_expression() to skip doing it again.
+ *
+ * Like most of the planner, this feels free to scribble on its input data
+ * structure.
+ */
+void
+preprocess_function_rtes(PlannerInfo *root)
+{
+ ListCell *rt;
+
+ foreach(rt, root->parse->rtable)
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(rt);
+
+ if (rte->rtekind == RTE_FUNCTION)
+ {
+ Query *funcquery;
+
+ /* Apply const-simplification */
+ rte->functions = (List *)
+ eval_const_expressions(root, (Node *) rte->functions);
+
+ /* Check safety of expansion, and expand if possible */
+ funcquery = inline_set_returning_function(root, rte);
+ if (funcquery)
+ {
+ /* Successful expansion, convert the RTE to a subquery */
+ rte->rtekind = RTE_SUBQUERY;
+ rte->subquery = funcquery;
+ rte->security_barrier = false;
+ /* Clear fields that should not be set in a subquery RTE */
+ rte->functions = NIL;
+ rte->funcordinality = false;
+ }
+ }
+ }
+}
+
+/*
+ * pull_up_subqueries
+ * Look for subqueries in the rangetable that can be pulled up into
+ * the parent query. If the subquery has no special features like
+ * grouping/aggregation then we can merge it into the parent's jointree.
+ * Also, subqueries that are simple UNION ALL structures can be
+ * converted into "append relations".
+ */
+void
+pull_up_subqueries(PlannerInfo *root)
+{
+ /* Top level of jointree must always be a FromExpr */
+ Assert(IsA(root->parse->jointree, FromExpr));
+ /* Recursion starts with no containing join nor appendrel */
+ root->parse->jointree = (FromExpr *)
+ pull_up_subqueries_recurse(root, (Node *) root->parse->jointree,
+ NULL, NULL, NULL);
+ /* We should still have a FromExpr */
+ Assert(IsA(root->parse->jointree, FromExpr));
+}
+
+/*
+ * pull_up_subqueries_recurse
+ * Recursive guts of pull_up_subqueries.
+ *
+ * This recursively processes the jointree and returns a modified jointree.
+ *
+ * If this jointree node is within either side of an outer join, then
+ * lowest_outer_join references the lowest such JoinExpr node; otherwise
+ * it is NULL. We use this to constrain the effects of LATERAL subqueries.
+ *
+ * If this jointree node is within the nullable side of an outer join, then
+ * lowest_nulling_outer_join references the lowest such JoinExpr node;
+ * otherwise it is NULL. This forces use of the PlaceHolderVar mechanism for
+ * references to non-nullable targetlist items, but only for references above
+ * that join.
+ *
+ * If we are looking at a member subquery of an append relation,
+ * containing_appendrel describes that relation; else it is NULL.
+ * This forces use of the PlaceHolderVar mechanism for all non-Var targetlist
+ * items, and puts some additional restrictions on what can be pulled up.
+ *
+ * A tricky aspect of this code is that if we pull up a subquery we have
+ * to replace Vars that reference the subquery's outputs throughout the
+ * parent query, including quals attached to jointree nodes above the one
+ * we are currently processing! We handle this by being careful to maintain
+ * validity of the jointree structure while recursing, in the following sense:
+ * whenever we recurse, all qual expressions in the tree must be reachable
+ * from the top level, in case the recursive call needs to modify them.
+ *
+ * Notice also that we can't turn pullup_replace_vars loose on the whole
+ * jointree, because it'd return a mutated copy of the tree; we have to
+ * invoke it just on the quals, instead. This behavior is what makes it
+ * reasonable to pass lowest_outer_join and lowest_nulling_outer_join as
+ * pointers rather than some more-indirect way of identifying the lowest
+ * OJs. Likewise, we don't replace append_rel_list members but only their
+ * substructure, so the containing_appendrel reference is safe to use.
+ */
+static Node *
+pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode,
+ JoinExpr *lowest_outer_join,
+ JoinExpr *lowest_nulling_outer_join,
+ AppendRelInfo *containing_appendrel)
+{
+ /* Since this function recurses, it could be driven to stack overflow. */
+ check_stack_depth();
+ /* Also, since it's a bit expensive, let's check for query cancel. */
+ CHECK_FOR_INTERRUPTS();
+
+ Assert(jtnode != NULL);
+ if (IsA(jtnode, RangeTblRef))
+ {
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+ RangeTblEntry *rte = rt_fetch(varno, root->parse->rtable);
+
+ /*
+ * Is this a subquery RTE, and if so, is the subquery simple enough to
+ * pull up?
+ *
+ * If we are looking at an append-relation member, we can't pull it up
+ * unless is_safe_append_member says so.
+ */
+ if (rte->rtekind == RTE_SUBQUERY &&
+ is_simple_subquery(root, rte->subquery, rte, lowest_outer_join) &&
+ (containing_appendrel == NULL ||
+ is_safe_append_member(rte->subquery)))
+ return pull_up_simple_subquery(root, jtnode, rte,
+ lowest_outer_join,
+ lowest_nulling_outer_join,
+ containing_appendrel);
+
+ /*
+ * Alternatively, is it a simple UNION ALL subquery? If so, flatten
+ * into an "append relation".
+ *
+ * It's safe to do this regardless of whether this query is itself an
+ * appendrel member. (If you're thinking we should try to flatten the
+ * two levels of appendrel together, you're right; but we handle that
+ * in set_append_rel_pathlist, not here.)
+ */
+ if (rte->rtekind == RTE_SUBQUERY &&
+ is_simple_union_all(rte->subquery))
+ return pull_up_simple_union_all(root, jtnode, rte);
+
+ /*
+ * Or perhaps it's a simple VALUES RTE?
+ *
+ * We don't allow VALUES pullup below an outer join nor into an
+ * appendrel (such cases are impossible anyway at the moment).
+ */
+ if (rte->rtekind == RTE_VALUES &&
+ lowest_outer_join == NULL &&
+ containing_appendrel == NULL &&
+ is_simple_values(root, rte))
+ return pull_up_simple_values(root, jtnode, rte);
+
+ /*
+ * Or perhaps it's a FUNCTION RTE that we could inline?
+ */
+ if (rte->rtekind == RTE_FUNCTION)
+ return pull_up_constant_function(root, jtnode, rte,
+ lowest_nulling_outer_join,
+ containing_appendrel);
+
+ /* Otherwise, do nothing at this node. */
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ ListCell *l;
+
+ Assert(containing_appendrel == NULL);
+ /* Recursively transform all the child nodes */
+ foreach(l, f->fromlist)
+ {
+ lfirst(l) = pull_up_subqueries_recurse(root, lfirst(l),
+ lowest_outer_join,
+ lowest_nulling_outer_join,
+ NULL);
+ }
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+
+ Assert(containing_appendrel == NULL);
+ /* Recurse, being careful to tell myself when inside outer join */
+ switch (j->jointype)
+ {
+ case JOIN_INNER:
+ j->larg = pull_up_subqueries_recurse(root, j->larg,
+ lowest_outer_join,
+ lowest_nulling_outer_join,
+ NULL);
+ j->rarg = pull_up_subqueries_recurse(root, j->rarg,
+ lowest_outer_join,
+ lowest_nulling_outer_join,
+ NULL);
+ break;
+ case JOIN_LEFT:
+ case JOIN_SEMI:
+ case JOIN_ANTI:
+ j->larg = pull_up_subqueries_recurse(root, j->larg,
+ j,
+ lowest_nulling_outer_join,
+ NULL);
+ j->rarg = pull_up_subqueries_recurse(root, j->rarg,
+ j,
+ j,
+ NULL);
+ break;
+ case JOIN_FULL:
+ j->larg = pull_up_subqueries_recurse(root, j->larg,
+ j,
+ j,
+ NULL);
+ j->rarg = pull_up_subqueries_recurse(root, j->rarg,
+ j,
+ j,
+ NULL);
+ break;
+ case JOIN_RIGHT:
+ j->larg = pull_up_subqueries_recurse(root, j->larg,
+ j,
+ j,
+ NULL);
+ j->rarg = pull_up_subqueries_recurse(root, j->rarg,
+ j,
+ lowest_nulling_outer_join,
+ NULL);
+ break;
+ default:
+ elog(ERROR, "unrecognized join type: %d",
+ (int) j->jointype);
+ break;
+ }
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+ return jtnode;
+}
+
+/*
+ * pull_up_simple_subquery
+ * Attempt to pull up a single simple subquery.
+ *
+ * jtnode is a RangeTblRef that has been tentatively identified as a simple
+ * subquery by pull_up_subqueries. We return the replacement jointree node,
+ * or jtnode itself if we determine that the subquery can't be pulled up
+ * after all.
+ *
+ * rte is the RangeTblEntry referenced by jtnode. Remaining parameters are
+ * as for pull_up_subqueries_recurse.
+ */
+static Node *
+pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
+ JoinExpr *lowest_outer_join,
+ JoinExpr *lowest_nulling_outer_join,
+ AppendRelInfo *containing_appendrel)
+{
+ Query *parse = root->parse;
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+ Query *subquery;
+ PlannerInfo *subroot;
+ int rtoffset;
+ pullup_replace_vars_context rvcontext;
+ ListCell *lc;
+
+ /*
+ * Make a modifiable copy of the subquery to hack on, so that the RTE will
+ * be left unchanged in case we decide below that we can't pull it up
+ * after all.
+ */
+ subquery = copyObject(rte->subquery);
+
+ /*
+ * Create a PlannerInfo data structure for this subquery.
+ *
+ * NOTE: the next few steps should match the first processing in
+ * subquery_planner(). Can we refactor to avoid code duplication, or
+ * would that just make things uglier?
+ */
+ subroot = makeNode(PlannerInfo);
+ subroot->parse = subquery;
+ subroot->glob = root->glob;
+ subroot->query_level = root->query_level;
+ subroot->parent_root = root->parent_root;
+ subroot->plan_params = NIL;
+ subroot->outer_params = NULL;
+ subroot->planner_cxt = CurrentMemoryContext;
+ subroot->init_plans = NIL;
+ subroot->cte_plan_ids = NIL;
+ subroot->multiexpr_params = NIL;
+ subroot->eq_classes = NIL;
+ subroot->ec_merging_done = false;
+ subroot->all_result_relids = NULL;
+ subroot->leaf_result_relids = NULL;
+ subroot->append_rel_list = NIL;
+ subroot->row_identity_vars = NIL;
+ subroot->rowMarks = NIL;
+ memset(subroot->upper_rels, 0, sizeof(subroot->upper_rels));
+ memset(subroot->upper_targets, 0, sizeof(subroot->upper_targets));
+ subroot->processed_tlist = NIL;
+ subroot->update_colnos = NIL;
+ subroot->grouping_map = NULL;
+ subroot->minmax_aggs = NIL;
+ subroot->qual_security_level = 0;
+ subroot->hasRecursion = false;
+ subroot->wt_param_id = -1;
+ subroot->non_recursive_path = NULL;
+
+ /* No CTEs to worry about */
+ Assert(subquery->cteList == NIL);
+
+ /*
+ * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
+ * that we don't need so many special cases to deal with that situation.
+ */
+ replace_empty_jointree(subquery);
+
+ /*
+ * Pull up any SubLinks within the subquery's quals, so that we don't
+ * leave unoptimized SubLinks behind.
+ */
+ if (subquery->hasSubLinks)
+ pull_up_sublinks(subroot);
+
+ /*
+ * Similarly, preprocess its function RTEs to inline any set-returning
+ * functions in its rangetable.
+ */
+ preprocess_function_rtes(subroot);
+
+ /*
+ * Recursively pull up the subquery's subqueries, so that
+ * pull_up_subqueries' processing is complete for its jointree and
+ * rangetable.
+ *
+ * Note: it's okay that the subquery's recursion starts with NULL for
+ * containing-join info, even if we are within an outer join in the upper
+ * query; the lower query starts with a clean slate for outer-join
+ * semantics. Likewise, we needn't pass down appendrel state.
+ */
+ pull_up_subqueries(subroot);
+
+ /*
+ * Now we must recheck whether the subquery is still simple enough to pull
+ * up. If not, abandon processing it.
+ *
+ * We don't really need to recheck all the conditions involved, but it's
+ * easier just to keep this "if" looking the same as the one in
+ * pull_up_subqueries_recurse.
+ */
+ if (is_simple_subquery(root, subquery, rte, lowest_outer_join) &&
+ (containing_appendrel == NULL || is_safe_append_member(subquery)))
+ {
+ /* good to go */
+ }
+ else
+ {
+ /*
+ * Give up, return unmodified RangeTblRef.
+ *
+ * Note: The work we just did will be redone when the subquery gets
+ * planned on its own. Perhaps we could avoid that by storing the
+ * modified subquery back into the rangetable, but I'm not gonna risk
+ * it now.
+ */
+ return jtnode;
+ }
+
+ /*
+ * We must flatten any join alias Vars in the subquery's targetlist,
+ * because pulling up the subquery's subqueries might have changed their
+ * expansions into arbitrary expressions, which could affect
+ * pullup_replace_vars' decisions about whether PlaceHolderVar wrappers
+ * are needed for tlist entries. (Likely it'd be better to do
+ * flatten_join_alias_vars on the whole query tree at some earlier stage,
+ * maybe even in the rewriter; but for now let's just fix this case here.)
+ */
+ subquery->targetList = (List *)
+ flatten_join_alias_vars(subroot->parse, (Node *) subquery->targetList);
+
+ /*
+ * Adjust level-0 varnos in subquery so that we can append its rangetable
+ * to upper query's. We have to fix the subquery's append_rel_list as
+ * well.
+ */
+ rtoffset = list_length(parse->rtable);
+ OffsetVarNodes((Node *) subquery, rtoffset, 0);
+ OffsetVarNodes((Node *) subroot->append_rel_list, rtoffset, 0);
+
+ /*
+ * Upper-level vars in subquery are now one level closer to their parent
+ * than before.
+ */
+ IncrementVarSublevelsUp((Node *) subquery, -1, 1);
+ IncrementVarSublevelsUp((Node *) subroot->append_rel_list, -1, 1);
+
+ /*
+ * The subquery's targetlist items are now in the appropriate form to
+ * insert into the top query, except that we may need to wrap them in
+ * PlaceHolderVars. Set up required context data for pullup_replace_vars.
+ */
+ rvcontext.root = root;
+ rvcontext.targetlist = subquery->targetList;
+ rvcontext.target_rte = rte;
+ if (rte->lateral)
+ rvcontext.relids = get_relids_in_jointree((Node *) subquery->jointree,
+ true);
+ else /* won't need relids */
+ rvcontext.relids = NULL;
+ rvcontext.outer_hasSubLinks = &parse->hasSubLinks;
+ rvcontext.varno = varno;
+ /* these flags will be set below, if needed */
+ rvcontext.need_phvs = false;
+ rvcontext.wrap_non_vars = false;
+ /* initialize cache array with indexes 0 .. length(tlist) */
+ rvcontext.rv_cache = palloc0((list_length(subquery->targetList) + 1) *
+ sizeof(Node *));
+
+ /*
+ * If we are under an outer join then non-nullable items and lateral
+ * references may have to be turned into PlaceHolderVars.
+ */
+ if (lowest_nulling_outer_join != NULL)
+ rvcontext.need_phvs = true;
+
+ /*
+ * If we are dealing with an appendrel member then anything that's not a
+ * simple Var has to be turned into a PlaceHolderVar. We force this to
+ * ensure that what we pull up doesn't get merged into a surrounding
+ * expression during later processing and then fail to match the
+ * expression actually available from the appendrel.
+ */
+ if (containing_appendrel != NULL)
+ {
+ rvcontext.need_phvs = true;
+ rvcontext.wrap_non_vars = true;
+ }
+
+ /*
+ * If the parent query uses grouping sets, we need a PlaceHolderVar for
+ * anything that's not a simple Var. Again, this ensures that expressions
+ * retain their separate identity so that they will match grouping set
+ * columns when appropriate. (It'd be sufficient to wrap values used in
+ * grouping set columns, and do so only in non-aggregated portions of the
+ * tlist and havingQual, but that would require a lot of infrastructure
+ * that pullup_replace_vars hasn't currently got.)
+ */
+ if (parse->groupingSets)
+ {
+ rvcontext.need_phvs = true;
+ rvcontext.wrap_non_vars = true;
+ }
+
+ /*
+ * Replace all of the top query's references to the subquery's outputs
+ * with copies of the adjusted subtlist items, being careful not to
+ * replace any of the jointree structure.
+ */
+ perform_pullup_replace_vars(root, &rvcontext,
+ lowest_nulling_outer_join,
+ containing_appendrel);
+
+ /*
+ * If the subquery had a LATERAL marker, propagate that to any of its
+ * child RTEs that could possibly now contain lateral cross-references.
+ * The children might or might not contain any actual lateral
+ * cross-references, but we have to mark the pulled-up child RTEs so that
+ * later planner stages will check for such.
+ */
+ if (rte->lateral)
+ {
+ foreach(lc, subquery->rtable)
+ {
+ RangeTblEntry *child_rte = (RangeTblEntry *) lfirst(lc);
+
+ switch (child_rte->rtekind)
+ {
+ case RTE_RELATION:
+ if (child_rte->tablesample)
+ child_rte->lateral = true;
+ break;
+ case RTE_SUBQUERY:
+ case RTE_FUNCTION:
+ case RTE_VALUES:
+ case RTE_TABLEFUNC:
+ child_rte->lateral = true;
+ break;
+ case RTE_JOIN:
+ case RTE_CTE:
+ case RTE_NAMEDTUPLESTORE:
+ case RTE_RESULT:
+ /* these can't contain any lateral references */
+ break;
+ }
+ }
+ }
+
+ /*
+ * Now append the adjusted rtable entries to upper query. (We hold off
+ * until after fixing the upper rtable entries; no point in running that
+ * code on the subquery ones too.)
+ */
+ parse->rtable = list_concat(parse->rtable, subquery->rtable);
+
+ /*
+ * Pull up any FOR UPDATE/SHARE markers, too. (OffsetVarNodes already
+ * adjusted the marker rtindexes, so just concat the lists.)
+ */
+ parse->rowMarks = list_concat(parse->rowMarks, subquery->rowMarks);
+
+ /*
+ * We also have to fix the relid sets of any PlaceHolderVar nodes in the
+ * parent query. (This could perhaps be done by pullup_replace_vars(),
+ * but it seems cleaner to use two passes.) Note in particular that any
+ * PlaceHolderVar nodes just created by pullup_replace_vars() will be
+ * adjusted, so having created them with the subquery's varno is correct.
+ *
+ * Likewise, relids appearing in AppendRelInfo nodes have to be fixed. We
+ * already checked that this won't require introducing multiple subrelids
+ * into the single-slot AppendRelInfo structs.
+ */
+ if (parse->hasSubLinks || root->glob->lastPHId != 0 ||
+ root->append_rel_list)
+ {
+ Relids subrelids;
+
+ subrelids = get_relids_in_jointree((Node *) subquery->jointree, false);
+ substitute_phv_relids((Node *) parse, varno, subrelids);
+ fix_append_rel_relids(root->append_rel_list, varno, subrelids);
+ }
+
+ /*
+ * And now add subquery's AppendRelInfos to our list.
+ */
+ root->append_rel_list = list_concat(root->append_rel_list,
+ subroot->append_rel_list);
+
+ /*
+ * We don't have to do the equivalent bookkeeping for outer-join info,
+ * because that hasn't been set up yet. placeholder_list likewise.
+ */
+ Assert(root->join_info_list == NIL);
+ Assert(subroot->join_info_list == NIL);
+ Assert(root->placeholder_list == NIL);
+ Assert(subroot->placeholder_list == NIL);
+
+ /*
+ * We no longer need the RTE's copy of the subquery's query tree. Getting
+ * rid of it saves nothing in particular so far as this level of query is
+ * concerned; but if this query level is in turn pulled up into a parent,
+ * we'd waste cycles copying the now-unused query tree.
+ */
+ rte->subquery = NULL;
+
+ /*
+ * Miscellaneous housekeeping.
+ *
+ * Although replace_rte_variables() faithfully updated parse->hasSubLinks
+ * if it copied any SubLinks out of the subquery's targetlist, we still
+ * could have SubLinks added to the query in the expressions of FUNCTION
+ * and VALUES RTEs copied up from the subquery. So it's necessary to copy
+ * subquery->hasSubLinks anyway. Perhaps this can be improved someday.
+ */
+ parse->hasSubLinks |= subquery->hasSubLinks;
+
+ /* If subquery had any RLS conditions, now main query does too */
+ parse->hasRowSecurity |= subquery->hasRowSecurity;
+
+ /*
+ * subquery won't be pulled up if it hasAggs, hasWindowFuncs, or
+ * hasTargetSRFs, so no work needed on those flags
+ */
+
+ /*
+ * Return the adjusted subquery jointree to replace the RangeTblRef entry
+ * in parent's jointree; or, if the FromExpr is degenerate, just return
+ * its single member.
+ */
+ Assert(IsA(subquery->jointree, FromExpr));
+ Assert(subquery->jointree->fromlist != NIL);
+ if (subquery->jointree->quals == NULL &&
+ list_length(subquery->jointree->fromlist) == 1)
+ return (Node *) linitial(subquery->jointree->fromlist);
+
+ return (Node *) subquery->jointree;
+}
+
+/*
+ * pull_up_simple_union_all
+ * Pull up a single simple UNION ALL subquery.
+ *
+ * jtnode is a RangeTblRef that has been identified as a simple UNION ALL
+ * subquery by pull_up_subqueries. We pull up the leaf subqueries and
+ * build an "append relation" for the union set. The result value is just
+ * jtnode, since we don't actually need to change the query jointree.
+ */
+static Node *
+pull_up_simple_union_all(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte)
+{
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+ Query *subquery = rte->subquery;
+ int rtoffset = list_length(root->parse->rtable);
+ List *rtable;
+
+ /*
+ * Make a modifiable copy of the subquery's rtable, so we can adjust
+ * upper-level Vars in it. There are no such Vars in the setOperations
+ * tree proper, so fixing the rtable should be sufficient.
+ */
+ rtable = copyObject(subquery->rtable);
+
+ /*
+ * Upper-level vars in subquery are now one level closer to their parent
+ * than before. We don't have to worry about offsetting varnos, though,
+ * because the UNION leaf queries can't cross-reference each other.
+ */
+ IncrementVarSublevelsUp_rtable(rtable, -1, 1);
+
+ /*
+ * If the UNION ALL subquery had a LATERAL marker, propagate that to all
+ * its children. The individual children might or might not contain any
+ * actual lateral cross-references, but we have to mark the pulled-up
+ * child RTEs so that later planner stages will check for such.
+ */
+ if (rte->lateral)
+ {
+ ListCell *rt;
+
+ foreach(rt, rtable)
+ {
+ RangeTblEntry *child_rte = (RangeTblEntry *) lfirst(rt);
+
+ Assert(child_rte->rtekind == RTE_SUBQUERY);
+ child_rte->lateral = true;
+ }
+ }
+
+ /*
+ * Append child RTEs to parent rtable.
+ */
+ root->parse->rtable = list_concat(root->parse->rtable, rtable);
+
+ /*
+ * Recursively scan the subquery's setOperations tree and add
+ * AppendRelInfo nodes for leaf subqueries to the parent's
+ * append_rel_list. Also apply pull_up_subqueries to the leaf subqueries.
+ */
+ Assert(subquery->setOperations);
+ pull_up_union_leaf_queries(subquery->setOperations, root, varno, subquery,
+ rtoffset);
+
+ /*
+ * Mark the parent as an append relation.
+ */
+ rte->inh = true;
+
+ return jtnode;
+}
+
+/*
+ * pull_up_union_leaf_queries -- recursive guts of pull_up_simple_union_all
+ *
+ * Build an AppendRelInfo for each leaf query in the setop tree, and then
+ * apply pull_up_subqueries to the leaf query.
+ *
+ * Note that setOpQuery is the Query containing the setOp node, whose tlist
+ * contains references to all the setop output columns. When called from
+ * pull_up_simple_union_all, this is *not* the same as root->parse, which is
+ * the parent Query we are pulling up into.
+ *
+ * parentRTindex is the appendrel parent's index in root->parse->rtable.
+ *
+ * The child RTEs have already been copied to the parent. childRToffset
+ * tells us where in the parent's range table they were copied. When called
+ * from flatten_simple_union_all, childRToffset is 0 since the child RTEs
+ * were already in root->parse->rtable and no RT index adjustment is needed.
+ */
+static void
+pull_up_union_leaf_queries(Node *setOp, PlannerInfo *root, int parentRTindex,
+ Query *setOpQuery, int childRToffset)
+{
+ if (IsA(setOp, RangeTblRef))
+ {
+ RangeTblRef *rtr = (RangeTblRef *) setOp;
+ int childRTindex;
+ AppendRelInfo *appinfo;
+
+ /*
+ * Calculate the index in the parent's range table
+ */
+ childRTindex = childRToffset + rtr->rtindex;
+
+ /*
+ * Build a suitable AppendRelInfo, and attach to parent's list.
+ */
+ appinfo = makeNode(AppendRelInfo);
+ appinfo->parent_relid = parentRTindex;
+ appinfo->child_relid = childRTindex;
+ appinfo->parent_reltype = InvalidOid;
+ appinfo->child_reltype = InvalidOid;
+ make_setop_translation_list(setOpQuery, childRTindex, appinfo);
+ appinfo->parent_reloid = InvalidOid;
+ root->append_rel_list = lappend(root->append_rel_list, appinfo);
+
+ /*
+ * Recursively apply pull_up_subqueries to the new child RTE. (We
+ * must build the AppendRelInfo first, because this will modify it.)
+ * Note that we can pass NULL for containing-join info even if we're
+ * actually under an outer join, because the child's expressions
+ * aren't going to propagate up to the join. Also, we ignore the
+ * possibility that pull_up_subqueries_recurse() returns a different
+ * jointree node than what we pass it; if it does, the important thing
+ * is that it replaced the child relid in the AppendRelInfo node.
+ */
+ rtr = makeNode(RangeTblRef);
+ rtr->rtindex = childRTindex;
+ (void) pull_up_subqueries_recurse(root, (Node *) rtr,
+ NULL, NULL, appinfo);
+ }
+ else if (IsA(setOp, SetOperationStmt))
+ {
+ SetOperationStmt *op = (SetOperationStmt *) setOp;
+
+ /* Recurse to reach leaf queries */
+ pull_up_union_leaf_queries(op->larg, root, parentRTindex, setOpQuery,
+ childRToffset);
+ pull_up_union_leaf_queries(op->rarg, root, parentRTindex, setOpQuery,
+ childRToffset);
+ }
+ else
+ {
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(setOp));
+ }
+}
+
+/*
+ * make_setop_translation_list
+ * Build the list of translations from parent Vars to child Vars for
+ * a UNION ALL member. (At this point it's just a simple list of
+ * referencing Vars, but if we succeed in pulling up the member
+ * subquery, the Vars will get replaced by pulled-up expressions.)
+ * Also create the rather trivial reverse-translation array.
+ */
+static void
+make_setop_translation_list(Query *query, int newvarno,
+ AppendRelInfo *appinfo)
+{
+ List *vars = NIL;
+ AttrNumber *pcolnos;
+ ListCell *l;
+
+ /* Initialize reverse-translation array with all entries zero */
+ /* (entries for resjunk columns will stay that way) */
+ appinfo->num_child_cols = list_length(query->targetList);
+ appinfo->parent_colnos = pcolnos =
+ (AttrNumber *) palloc0(appinfo->num_child_cols * sizeof(AttrNumber));
+
+ foreach(l, query->targetList)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (tle->resjunk)
+ continue;
+
+ vars = lappend(vars, makeVarFromTargetEntry(newvarno, tle));
+ pcolnos[tle->resno - 1] = tle->resno;
+ }
+
+ appinfo->translated_vars = vars;
+}
+
+/*
+ * is_simple_subquery
+ * Check a subquery in the range table to see if it's simple enough
+ * to pull up into the parent query.
+ *
+ * rte is the RTE_SUBQUERY RangeTblEntry that contained the subquery.
+ * (Note subquery is not necessarily equal to rte->subquery; it could be a
+ * processed copy of that.)
+ * lowest_outer_join is the lowest outer join above the subquery, or NULL.
+ */
+static bool
+is_simple_subquery(PlannerInfo *root, Query *subquery, RangeTblEntry *rte,
+ JoinExpr *lowest_outer_join)
+{
+ /*
+ * Let's just make sure it's a valid subselect ...
+ */
+ if (!IsA(subquery, Query) ||
+ subquery->commandType != CMD_SELECT)
+ elog(ERROR, "subquery is bogus");
+
+ /*
+ * Can't currently pull up a query with setops (unless it's simple UNION
+ * ALL, which is handled by a different code path). Maybe after querytree
+ * redesign...
+ */
+ if (subquery->setOperations)
+ return false;
+
+ /*
+ * Can't pull up a subquery involving grouping, aggregation, SRFs,
+ * sorting, limiting, or WITH. (XXX WITH could possibly be allowed later)
+ *
+ * We also don't pull up a subquery that has explicit FOR UPDATE/SHARE
+ * clauses, because pullup would cause the locking to occur semantically
+ * higher than it should. Implicit FOR UPDATE/SHARE is okay because in
+ * that case the locking was originally declared in the upper query
+ * anyway.
+ */
+ if (subquery->hasAggs ||
+ subquery->hasWindowFuncs ||
+ subquery->hasTargetSRFs ||
+ subquery->groupClause ||
+ subquery->groupingSets ||
+ subquery->havingQual ||
+ subquery->sortClause ||
+ subquery->distinctClause ||
+ subquery->limitOffset ||
+ subquery->limitCount ||
+ subquery->hasForUpdate ||
+ subquery->cteList)
+ return false;
+
+ /*
+ * Don't pull up if the RTE represents a security-barrier view; we
+ * couldn't prevent information leakage once the RTE's Vars are scattered
+ * about in the upper query.
+ */
+ if (rte->security_barrier)
+ return false;
+
+ /*
+ * If the subquery is LATERAL, check for pullup restrictions from that.
+ */
+ if (rte->lateral)
+ {
+ bool restricted;
+ Relids safe_upper_varnos;
+
+ /*
+ * The subquery's WHERE and JOIN/ON quals mustn't contain any lateral
+ * references to rels outside a higher outer join (including the case
+ * where the outer join is within the subquery itself). In such a
+ * case, pulling up would result in a situation where we need to
+ * postpone quals from below an outer join to above it, which is
+ * probably completely wrong and in any case is a complication that
+ * doesn't seem worth addressing at the moment.
+ */
+ if (lowest_outer_join != NULL)
+ {
+ restricted = true;
+ safe_upper_varnos = get_relids_in_jointree((Node *) lowest_outer_join,
+ true);
+ }
+ else
+ {
+ restricted = false;
+ safe_upper_varnos = NULL; /* doesn't matter */
+ }
+
+ if (jointree_contains_lateral_outer_refs(root,
+ (Node *) subquery->jointree,
+ restricted, safe_upper_varnos))
+ return false;
+
+ /*
+ * If there's an outer join above the LATERAL subquery, also disallow
+ * pullup if the subquery's targetlist has any references to rels
+ * outside the outer join, since these might get pulled into quals
+ * above the subquery (but in or below the outer join) and then lead
+ * to qual-postponement issues similar to the case checked for above.
+ * (We wouldn't need to prevent pullup if no such references appear in
+ * outer-query quals, but we don't have enough info here to check
+ * that. Also, maybe this restriction could be removed if we forced
+ * such refs to be wrapped in PlaceHolderVars, even when they're below
+ * the nearest outer join? But it's a pretty hokey usage, so not
+ * clear this is worth sweating over.)
+ */
+ if (lowest_outer_join != NULL)
+ {
+ Relids lvarnos = pull_varnos_of_level(root,
+ (Node *) subquery->targetList,
+ 1);
+
+ if (!bms_is_subset(lvarnos, safe_upper_varnos))
+ return false;
+ }
+ }
+
+ /*
+ * Don't pull up a subquery that has any volatile functions in its
+ * targetlist. Otherwise we might introduce multiple evaluations of these
+ * functions, if they get copied to multiple places in the upper query,
+ * leading to surprising results. (Note: the PlaceHolderVar mechanism
+ * doesn't quite guarantee single evaluation; else we could pull up anyway
+ * and just wrap such items in PlaceHolderVars ...)
+ */
+ if (contain_volatile_functions((Node *) subquery->targetList))
+ return false;
+
+ return true;
+}
+
+/*
+ * pull_up_simple_values
+ * Pull up a single simple VALUES RTE.
+ *
+ * jtnode is a RangeTblRef that has been identified as a simple VALUES RTE
+ * by pull_up_subqueries. We always return a RangeTblRef representing a
+ * RESULT RTE to replace it (all failure cases should have been detected by
+ * is_simple_values()). Actually, what we return is just jtnode, because
+ * we replace the VALUES RTE in the rangetable with the RESULT RTE.
+ *
+ * rte is the RangeTblEntry referenced by jtnode. Because of the limited
+ * possible usage of VALUES RTEs, we do not need the remaining parameters
+ * of pull_up_subqueries_recurse.
+ */
+static Node *
+pull_up_simple_values(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte)
+{
+ Query *parse = root->parse;
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+ List *values_list;
+ List *tlist;
+ AttrNumber attrno;
+ pullup_replace_vars_context rvcontext;
+ ListCell *lc;
+
+ Assert(rte->rtekind == RTE_VALUES);
+ Assert(list_length(rte->values_lists) == 1);
+
+ /*
+ * Need a modifiable copy of the VALUES list to hack on, just in case it's
+ * multiply referenced.
+ */
+ values_list = copyObject(linitial(rte->values_lists));
+
+ /*
+ * The VALUES RTE can't contain any Vars of level zero, let alone any that
+ * are join aliases, so no need to flatten join alias Vars.
+ */
+ Assert(!contain_vars_of_level((Node *) values_list, 0));
+
+ /*
+ * Set up required context data for pullup_replace_vars. In particular,
+ * we have to make the VALUES list look like a subquery targetlist.
+ */
+ tlist = NIL;
+ attrno = 1;
+ foreach(lc, values_list)
+ {
+ tlist = lappend(tlist,
+ makeTargetEntry((Expr *) lfirst(lc),
+ attrno,
+ NULL,
+ false));
+ attrno++;
+ }
+ rvcontext.root = root;
+ rvcontext.targetlist = tlist;
+ rvcontext.target_rte = rte;
+ rvcontext.relids = NULL;
+ rvcontext.outer_hasSubLinks = &parse->hasSubLinks;
+ rvcontext.varno = varno;
+ rvcontext.need_phvs = false;
+ rvcontext.wrap_non_vars = false;
+ /* initialize cache array with indexes 0 .. length(tlist) */
+ rvcontext.rv_cache = palloc0((list_length(tlist) + 1) *
+ sizeof(Node *));
+
+ /*
+ * Replace all of the top query's references to the RTE's outputs with
+ * copies of the adjusted VALUES expressions, being careful not to replace
+ * any of the jointree structure. We can assume there's no outer joins or
+ * appendrels in the dummy Query that surrounds a VALUES RTE.
+ */
+ perform_pullup_replace_vars(root, &rvcontext, NULL, NULL);
+
+ /*
+ * There should be no appendrels to fix, nor any outer joins and hence no
+ * PlaceHolderVars.
+ */
+ Assert(root->append_rel_list == NIL);
+ Assert(root->join_info_list == NIL);
+ Assert(root->placeholder_list == NIL);
+
+ /*
+ * Replace the VALUES RTE with a RESULT RTE. The VALUES RTE is the only
+ * rtable entry in the current query level, so this is easy.
+ */
+ Assert(list_length(parse->rtable) == 1);
+
+ /* Create suitable RTE */
+ rte = makeNode(RangeTblEntry);
+ rte->rtekind = RTE_RESULT;
+ rte->eref = makeAlias("*RESULT*", NIL);
+
+ /* Replace rangetable */
+ parse->rtable = list_make1(rte);
+
+ /* We could manufacture a new RangeTblRef, but the one we have is fine */
+ Assert(varno == 1);
+
+ return jtnode;
+}
+
+/*
+ * is_simple_values
+ * Check a VALUES RTE in the range table to see if it's simple enough
+ * to pull up into the parent query.
+ *
+ * rte is the RTE_VALUES RangeTblEntry to check.
+ */
+static bool
+is_simple_values(PlannerInfo *root, RangeTblEntry *rte)
+{
+ Assert(rte->rtekind == RTE_VALUES);
+
+ /*
+ * There must be exactly one VALUES list, else it's not semantically
+ * correct to replace the VALUES RTE with a RESULT RTE, nor would we have
+ * a unique set of expressions to substitute into the parent query.
+ */
+ if (list_length(rte->values_lists) != 1)
+ return false;
+
+ /*
+ * Because VALUES can't appear under an outer join (or at least, we won't
+ * try to pull it up if it does), we need not worry about LATERAL, nor
+ * about validity of PHVs for the VALUES' outputs.
+ */
+
+ /*
+ * Don't pull up a VALUES that contains any set-returning or volatile
+ * functions. The considerations here are basically identical to the
+ * restrictions on a pull-able subquery's targetlist.
+ */
+ if (expression_returns_set((Node *) rte->values_lists) ||
+ contain_volatile_functions((Node *) rte->values_lists))
+ return false;
+
+ /*
+ * Do not pull up a VALUES that's not the only RTE in its parent query.
+ * This is actually the only case that the parser will generate at the
+ * moment, and assuming this is true greatly simplifies
+ * pull_up_simple_values().
+ */
+ if (list_length(root->parse->rtable) != 1 ||
+ rte != (RangeTblEntry *) linitial(root->parse->rtable))
+ return false;
+
+ return true;
+}
+
+/*
+ * pull_up_constant_function
+ * Pull up an RTE_FUNCTION expression that was simplified to a constant.
+ *
+ * jtnode is a RangeTblRef that has been identified as a FUNCTION RTE by
+ * pull_up_subqueries. If its expression is just a Const, hoist that value
+ * up into the parent query, and replace the RTE_FUNCTION with RTE_RESULT.
+ *
+ * In principle we could pull up any immutable expression, but we don't.
+ * That might result in multiple evaluations of the expression, which could
+ * be costly if it's not just a Const. Also, the main value of this is
+ * to let the constant participate in further const-folding, and of course
+ * that won't happen for a non-Const.
+ *
+ * The pulled-up value might need to be wrapped in a PlaceHolderVar if the
+ * RTE is below an outer join or is part of an appendrel; the extra
+ * parameters show whether that's needed.
+ */
+static Node *
+pull_up_constant_function(PlannerInfo *root, Node *jtnode,
+ RangeTblEntry *rte,
+ JoinExpr *lowest_nulling_outer_join,
+ AppendRelInfo *containing_appendrel)
+{
+ Query *parse = root->parse;
+ RangeTblFunction *rtf;
+ TypeFuncClass functypclass;
+ Oid funcrettype;
+ TupleDesc tupdesc;
+ pullup_replace_vars_context rvcontext;
+
+ /* Fail if the RTE has ORDINALITY - we don't implement that here. */
+ if (rte->funcordinality)
+ return jtnode;
+
+ /* Fail if RTE isn't a single, simple Const expr */
+ if (list_length(rte->functions) != 1)
+ return jtnode;
+ rtf = linitial_node(RangeTblFunction, rte->functions);
+ if (!IsA(rtf->funcexpr, Const))
+ return jtnode;
+
+ /*
+ * If the function's result is not a scalar, we punt. In principle we
+ * could break the composite constant value apart into per-column
+ * constants, but for now it seems not worth the work.
+ */
+ if (rtf->funccolcount != 1)
+ return jtnode; /* definitely composite */
+
+ functypclass = get_expr_result_type(rtf->funcexpr,
+ &funcrettype,
+ &tupdesc);
+ if (functypclass != TYPEFUNC_SCALAR)
+ return jtnode; /* must be a one-column composite type */
+
+ /* Create context for applying pullup_replace_vars */
+ rvcontext.root = root;
+ rvcontext.targetlist = list_make1(makeTargetEntry((Expr *) rtf->funcexpr,
+ 1, /* resno */
+ NULL, /* resname */
+ false)); /* resjunk */
+ rvcontext.target_rte = rte;
+
+ /*
+ * Since this function was reduced to a Const, it doesn't contain any
+ * lateral references, even if it's marked as LATERAL. This means we
+ * don't need to fill relids.
+ */
+ rvcontext.relids = NULL;
+
+ rvcontext.outer_hasSubLinks = &parse->hasSubLinks;
+ rvcontext.varno = ((RangeTblRef *) jtnode)->rtindex;
+ /* these flags will be set below, if needed */
+ rvcontext.need_phvs = false;
+ rvcontext.wrap_non_vars = false;
+ /* initialize cache array with indexes 0 .. length(tlist) */
+ rvcontext.rv_cache = palloc0((list_length(rvcontext.targetlist) + 1) *
+ sizeof(Node *));
+
+ /*
+ * If we are under an outer join then non-nullable items and lateral
+ * references may have to be turned into PlaceHolderVars.
+ */
+ if (lowest_nulling_outer_join != NULL)
+ rvcontext.need_phvs = true;
+
+ /*
+ * If we are dealing with an appendrel member then anything that's not a
+ * simple Var has to be turned into a PlaceHolderVar. (See comments in
+ * pull_up_simple_subquery().)
+ */
+ if (containing_appendrel != NULL)
+ {
+ rvcontext.need_phvs = true;
+ rvcontext.wrap_non_vars = true;
+ }
+
+ /*
+ * If the parent query uses grouping sets, we need a PlaceHolderVar for
+ * anything that's not a simple Var.
+ */
+ if (parse->groupingSets)
+ {
+ rvcontext.need_phvs = true;
+ rvcontext.wrap_non_vars = true;
+ }
+
+ /*
+ * Replace all of the top query's references to the RTE's output with
+ * copies of the funcexpr, being careful not to replace any of the
+ * jointree structure.
+ */
+ perform_pullup_replace_vars(root, &rvcontext,
+ lowest_nulling_outer_join,
+ containing_appendrel);
+
+ /*
+ * We don't need to bother with changing PlaceHolderVars in the parent
+ * query. Their references to the RT index are still good for now, and
+ * will get removed later if we're able to drop the RTE_RESULT.
+ */
+
+ /*
+ * Convert the RTE to be RTE_RESULT type, signifying that we don't need to
+ * scan it anymore, and zero out RTE_FUNCTION-specific fields. Also make
+ * sure the RTE is not marked LATERAL, since elsewhere we don't expect
+ * RTE_RESULTs to be LATERAL.
+ */
+ rte->rtekind = RTE_RESULT;
+ rte->functions = NIL;
+ rte->lateral = false;
+
+ /*
+ * We can reuse the RangeTblRef node.
+ */
+ return jtnode;
+}
+
+/*
+ * is_simple_union_all
+ * Check a subquery to see if it's a simple UNION ALL.
+ *
+ * We require all the setops to be UNION ALL (no mixing) and there can't be
+ * any datatype coercions involved, ie, all the leaf queries must emit the
+ * same datatypes.
+ */
+static bool
+is_simple_union_all(Query *subquery)
+{
+ SetOperationStmt *topop;
+
+ /* Let's just make sure it's a valid subselect ... */
+ if (!IsA(subquery, Query) ||
+ subquery->commandType != CMD_SELECT)
+ elog(ERROR, "subquery is bogus");
+
+ /* Is it a set-operation query at all? */
+ topop = castNode(SetOperationStmt, subquery->setOperations);
+ if (!topop)
+ return false;
+
+ /* Can't handle ORDER BY, LIMIT/OFFSET, locking, or WITH */
+ if (subquery->sortClause ||
+ subquery->limitOffset ||
+ subquery->limitCount ||
+ subquery->rowMarks ||
+ subquery->cteList)
+ return false;
+
+ /* Recursively check the tree of set operations */
+ return is_simple_union_all_recurse((Node *) topop, subquery,
+ topop->colTypes);
+}
+
+static bool
+is_simple_union_all_recurse(Node *setOp, Query *setOpQuery, List *colTypes)
+{
+ /* Since this function recurses, it could be driven to stack overflow. */
+ check_stack_depth();
+
+ if (IsA(setOp, RangeTblRef))
+ {
+ RangeTblRef *rtr = (RangeTblRef *) setOp;
+ RangeTblEntry *rte = rt_fetch(rtr->rtindex, setOpQuery->rtable);
+ Query *subquery = rte->subquery;
+
+ Assert(subquery != NULL);
+
+ /* Leaf nodes are OK if they match the toplevel column types */
+ /* We don't have to compare typmods or collations here */
+ return tlist_same_datatypes(subquery->targetList, colTypes, true);
+ }
+ else if (IsA(setOp, SetOperationStmt))
+ {
+ SetOperationStmt *op = (SetOperationStmt *) setOp;
+
+ /* Must be UNION ALL */
+ if (op->op != SETOP_UNION || !op->all)
+ return false;
+
+ /* Recurse to check inputs */
+ return is_simple_union_all_recurse(op->larg, setOpQuery, colTypes) &&
+ is_simple_union_all_recurse(op->rarg, setOpQuery, colTypes);
+ }
+ else
+ {
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(setOp));
+ return false; /* keep compiler quiet */
+ }
+}
+
+/*
+ * is_safe_append_member
+ * Check a subquery that is a leaf of a UNION ALL appendrel to see if it's
+ * safe to pull up.
+ */
+static bool
+is_safe_append_member(Query *subquery)
+{
+ FromExpr *jtnode;
+
+ /*
+ * It's only safe to pull up the child if its jointree contains exactly
+ * one RTE, else the AppendRelInfo data structure breaks. The one base RTE
+ * could be buried in several levels of FromExpr, however. Also, if the
+ * child's jointree is completely empty, we can pull up because
+ * pull_up_simple_subquery will insert a single RTE_RESULT RTE instead.
+ *
+ * Also, the child can't have any WHERE quals because there's no place to
+ * put them in an appendrel. (This is a bit annoying...) If we didn't
+ * need to check this, we'd just test whether get_relids_in_jointree()
+ * yields a singleton set, to be more consistent with the coding of
+ * fix_append_rel_relids().
+ */
+ jtnode = subquery->jointree;
+ Assert(IsA(jtnode, FromExpr));
+ /* Check the completely-empty case */
+ if (jtnode->fromlist == NIL && jtnode->quals == NULL)
+ return true;
+ /* Check the more general case */
+ while (IsA(jtnode, FromExpr))
+ {
+ if (jtnode->quals != NULL)
+ return false;
+ if (list_length(jtnode->fromlist) != 1)
+ return false;
+ jtnode = linitial(jtnode->fromlist);
+ }
+ if (!IsA(jtnode, RangeTblRef))
+ return false;
+
+ return true;
+}
+
+/*
+ * jointree_contains_lateral_outer_refs
+ * Check for disallowed lateral references in a jointree's quals
+ *
+ * If restricted is false, all level-1 Vars are allowed (but we still must
+ * search the jointree, since it might contain outer joins below which there
+ * will be restrictions). If restricted is true, return true when any qual
+ * in the jointree contains level-1 Vars coming from outside the rels listed
+ * in safe_upper_varnos.
+ */
+static bool
+jointree_contains_lateral_outer_refs(PlannerInfo *root, Node *jtnode,
+ bool restricted,
+ Relids safe_upper_varnos)
+{
+ if (jtnode == NULL)
+ return false;
+ if (IsA(jtnode, RangeTblRef))
+ return false;
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ ListCell *l;
+
+ /* First, recurse to check child joins */
+ foreach(l, f->fromlist)
+ {
+ if (jointree_contains_lateral_outer_refs(root,
+ lfirst(l),
+ restricted,
+ safe_upper_varnos))
+ return true;
+ }
+
+ /* Then check the top-level quals */
+ if (restricted &&
+ !bms_is_subset(pull_varnos_of_level(root, f->quals, 1),
+ safe_upper_varnos))
+ return true;
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+
+ /*
+ * If this is an outer join, we mustn't allow any upper lateral
+ * references in or below it.
+ */
+ if (j->jointype != JOIN_INNER)
+ {
+ restricted = true;
+ safe_upper_varnos = NULL;
+ }
+
+ /* Check the child joins */
+ if (jointree_contains_lateral_outer_refs(root,
+ j->larg,
+ restricted,
+ safe_upper_varnos))
+ return true;
+ if (jointree_contains_lateral_outer_refs(root,
+ j->rarg,
+ restricted,
+ safe_upper_varnos))
+ return true;
+
+ /* Check the JOIN's qual clauses */
+ if (restricted &&
+ !bms_is_subset(pull_varnos_of_level(root, j->quals, 1),
+ safe_upper_varnos))
+ return true;
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+ return false;
+}
+
+/*
+ * Perform pullup_replace_vars everyplace it's needed in the query tree.
+ *
+ * Caller has already filled *rvcontext with data describing what to
+ * substitute for Vars referencing the target subquery. In addition
+ * we need the identity of the lowest outer join that can null the
+ * target subquery, and its containing appendrel if any.
+ */
+static void
+perform_pullup_replace_vars(PlannerInfo *root,
+ pullup_replace_vars_context *rvcontext,
+ JoinExpr *lowest_nulling_outer_join,
+ AppendRelInfo *containing_appendrel)
+{
+ Query *parse = root->parse;
+ ListCell *lc;
+
+ /*
+ * Replace all of the top query's references to the subquery's outputs
+ * with copies of the adjusted subtlist items, being careful not to
+ * replace any of the jointree structure. (This'd be a lot cleaner if we
+ * could use query_tree_mutator.) We have to use PHVs in the targetList,
+ * returningList, and havingQual, since those are certainly above any
+ * outer join. replace_vars_in_jointree tracks its location in the
+ * jointree and uses PHVs or not appropriately.
+ */
+ parse->targetList = (List *)
+ pullup_replace_vars((Node *) parse->targetList, rvcontext);
+ parse->returningList = (List *)
+ pullup_replace_vars((Node *) parse->returningList, rvcontext);
+
+ foreach(lc, parse->windowClause)
+ {
+ WindowClause *wc = lfirst_node(WindowClause, lc);
+
+ if (wc->runCondition != NIL)
+ wc->runCondition = (List *)
+ pullup_replace_vars((Node *) wc->runCondition, rvcontext);
+ }
+ if (parse->onConflict)
+ {
+ parse->onConflict->onConflictSet = (List *)
+ pullup_replace_vars((Node *) parse->onConflict->onConflictSet,
+ rvcontext);
+ parse->onConflict->onConflictWhere =
+ pullup_replace_vars(parse->onConflict->onConflictWhere,
+ rvcontext);
+
+ /*
+ * We assume ON CONFLICT's arbiterElems, arbiterWhere, exclRelTlist
+ * can't contain any references to a subquery.
+ */
+ }
+ if (parse->mergeActionList)
+ {
+ foreach(lc, parse->mergeActionList)
+ {
+ MergeAction *action = lfirst(lc);
+
+ action->qual = pullup_replace_vars(action->qual, rvcontext);
+ action->targetList = (List *)
+ pullup_replace_vars((Node *) action->targetList, rvcontext);
+ }
+ }
+ replace_vars_in_jointree((Node *) parse->jointree, rvcontext,
+ lowest_nulling_outer_join);
+ Assert(parse->setOperations == NULL);
+ parse->havingQual = pullup_replace_vars(parse->havingQual, rvcontext);
+
+ /*
+ * Replace references in the translated_vars lists of appendrels. When
+ * pulling up an appendrel member, we do not need PHVs in the list of the
+ * parent appendrel --- there isn't any outer join between. Elsewhere,
+ * use PHVs for safety. (This analysis could be made tighter but it seems
+ * unlikely to be worth much trouble.)
+ */
+ foreach(lc, root->append_rel_list)
+ {
+ AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc);
+ bool save_need_phvs = rvcontext->need_phvs;
+
+ if (appinfo == containing_appendrel)
+ rvcontext->need_phvs = false;
+ appinfo->translated_vars = (List *)
+ pullup_replace_vars((Node *) appinfo->translated_vars, rvcontext);
+ rvcontext->need_phvs = save_need_phvs;
+ }
+
+ /*
+ * Replace references in the joinaliasvars lists of join RTEs.
+ *
+ * You might think that we could avoid using PHVs for alias vars of joins
+ * below lowest_nulling_outer_join, but that doesn't work because the
+ * alias vars could be referenced above that join; we need the PHVs to be
+ * present in such references after the alias vars get flattened. (It
+ * might be worth trying to be smarter here, someday.)
+ */
+ foreach(lc, parse->rtable)
+ {
+ RangeTblEntry *otherrte = (RangeTblEntry *) lfirst(lc);
+
+ if (otherrte->rtekind == RTE_JOIN)
+ otherrte->joinaliasvars = (List *)
+ pullup_replace_vars((Node *) otherrte->joinaliasvars,
+ rvcontext);
+ }
+}
+
+/*
+ * Helper routine for perform_pullup_replace_vars: do pullup_replace_vars on
+ * every expression in the jointree, without changing the jointree structure
+ * itself. Ugly, but there's no other way...
+ *
+ * If we are at or below lowest_nulling_outer_join, we can suppress use of
+ * PlaceHolderVars wrapped around the replacement expressions.
+ */
+static void
+replace_vars_in_jointree(Node *jtnode,
+ pullup_replace_vars_context *context,
+ JoinExpr *lowest_nulling_outer_join)
+{
+ if (jtnode == NULL)
+ return;
+ if (IsA(jtnode, RangeTblRef))
+ {
+ /*
+ * If the RangeTblRef refers to a LATERAL subquery (that isn't the
+ * same subquery we're pulling up), it might contain references to the
+ * target subquery, which we must replace. We drive this from the
+ * jointree scan, rather than a scan of the rtable, for a couple of
+ * reasons: we can avoid processing no-longer-referenced RTEs, and we
+ * can use the appropriate setting of need_phvs depending on whether
+ * the RTE is above possibly-nulling outer joins or not.
+ */
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+
+ if (varno != context->varno) /* ignore target subquery itself */
+ {
+ RangeTblEntry *rte = rt_fetch(varno, context->root->parse->rtable);
+
+ Assert(rte != context->target_rte);
+ if (rte->lateral)
+ {
+ switch (rte->rtekind)
+ {
+ case RTE_RELATION:
+ /* shouldn't be marked LATERAL unless tablesample */
+ Assert(rte->tablesample);
+ rte->tablesample = (TableSampleClause *)
+ pullup_replace_vars((Node *) rte->tablesample,
+ context);
+ break;
+ case RTE_SUBQUERY:
+ rte->subquery =
+ pullup_replace_vars_subquery(rte->subquery,
+ context);
+ break;
+ case RTE_FUNCTION:
+ rte->functions = (List *)
+ pullup_replace_vars((Node *) rte->functions,
+ context);
+ break;
+ case RTE_TABLEFUNC:
+ rte->tablefunc = (TableFunc *)
+ pullup_replace_vars((Node *) rte->tablefunc,
+ context);
+ break;
+ case RTE_VALUES:
+ rte->values_lists = (List *)
+ pullup_replace_vars((Node *) rte->values_lists,
+ context);
+ break;
+ case RTE_JOIN:
+ case RTE_CTE:
+ case RTE_NAMEDTUPLESTORE:
+ case RTE_RESULT:
+ /* these shouldn't be marked LATERAL */
+ Assert(false);
+ break;
+ }
+ }
+ }
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ ListCell *l;
+
+ foreach(l, f->fromlist)
+ replace_vars_in_jointree(lfirst(l), context,
+ lowest_nulling_outer_join);
+ f->quals = pullup_replace_vars(f->quals, context);
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+ bool save_need_phvs = context->need_phvs;
+
+ if (j == lowest_nulling_outer_join)
+ {
+ /* no more PHVs in or below this join */
+ context->need_phvs = false;
+ lowest_nulling_outer_join = NULL;
+ }
+ replace_vars_in_jointree(j->larg, context, lowest_nulling_outer_join);
+ replace_vars_in_jointree(j->rarg, context, lowest_nulling_outer_join);
+
+ /*
+ * Use PHVs within the join quals of a full join, even when it's the
+ * lowest nulling outer join. Otherwise, we cannot identify which
+ * side of the join a pulled-up var-free expression came from, which
+ * can lead to failure to make a plan at all because none of the quals
+ * appear to be mergeable or hashable conditions. For this purpose we
+ * don't care about the state of wrap_non_vars, so leave it alone.
+ */
+ if (j->jointype == JOIN_FULL)
+ context->need_phvs = true;
+
+ j->quals = pullup_replace_vars(j->quals, context);
+
+ /*
+ * We don't bother to update the colvars list, since it won't be used
+ * again ...
+ */
+ context->need_phvs = save_need_phvs;
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+}
+
+/*
+ * Apply pullup variable replacement throughout an expression tree
+ *
+ * Returns a modified copy of the tree, so this can't be used where we
+ * need to do in-place replacement.
+ */
+static Node *
+pullup_replace_vars(Node *expr, pullup_replace_vars_context *context)
+{
+ return replace_rte_variables(expr,
+ context->varno, 0,
+ pullup_replace_vars_callback,
+ (void *) context,
+ context->outer_hasSubLinks);
+}
+
+static Node *
+pullup_replace_vars_callback(Var *var,
+ replace_rte_variables_context *context)
+{
+ pullup_replace_vars_context *rcon = (pullup_replace_vars_context *) context->callback_arg;
+ int varattno = var->varattno;
+ Node *newnode;
+
+ /*
+ * If PlaceHolderVars are needed, we cache the modified expressions in
+ * rcon->rv_cache[]. This is not in hopes of any material speed gain
+ * within this function, but to avoid generating identical PHVs with
+ * different IDs. That would result in duplicate evaluations at runtime,
+ * and possibly prevent optimizations that rely on recognizing different
+ * references to the same subquery output as being equal(). So it's worth
+ * a bit of extra effort to avoid it.
+ */
+ if (rcon->need_phvs &&
+ varattno >= InvalidAttrNumber &&
+ varattno <= list_length(rcon->targetlist) &&
+ rcon->rv_cache[varattno] != NULL)
+ {
+ /* Just copy the entry and fall through to adjust its varlevelsup */
+ newnode = copyObject(rcon->rv_cache[varattno]);
+ }
+ else if (varattno == InvalidAttrNumber)
+ {
+ /* Must expand whole-tuple reference into RowExpr */
+ RowExpr *rowexpr;
+ List *colnames;
+ List *fields;
+ bool save_need_phvs = rcon->need_phvs;
+ int save_sublevelsup = context->sublevels_up;
+
+ /*
+ * If generating an expansion for a var of a named rowtype (ie, this
+ * is a plain relation RTE), then we must include dummy items for
+ * dropped columns. If the var is RECORD (ie, this is a JOIN), then
+ * omit dropped columns. In the latter case, attach column names to
+ * the RowExpr for use of the executor and ruleutils.c.
+ *
+ * In order to be able to cache the results, we always generate the
+ * expansion with varlevelsup = 0, and then adjust if needed.
+ */
+ expandRTE(rcon->target_rte,
+ var->varno, 0 /* not varlevelsup */ , var->location,
+ (var->vartype != RECORDOID),
+ &colnames, &fields);
+ /* Adjust the generated per-field Vars, but don't insert PHVs */
+ rcon->need_phvs = false;
+ context->sublevels_up = 0; /* to match the expandRTE output */
+ fields = (List *) replace_rte_variables_mutator((Node *) fields,
+ context);
+ rcon->need_phvs = save_need_phvs;
+ context->sublevels_up = save_sublevelsup;
+
+ rowexpr = makeNode(RowExpr);
+ rowexpr->args = fields;
+ rowexpr->row_typeid = var->vartype;
+ rowexpr->row_format = COERCE_IMPLICIT_CAST;
+ rowexpr->colnames = (var->vartype == RECORDOID) ? colnames : NIL;
+ rowexpr->location = var->location;
+ newnode = (Node *) rowexpr;
+
+ /*
+ * Insert PlaceHolderVar if needed. Notice that we are wrapping one
+ * PlaceHolderVar around the whole RowExpr, rather than putting one
+ * around each element of the row. This is because we need the
+ * expression to yield NULL, not ROW(NULL,NULL,...) when it is forced
+ * to null by an outer join.
+ */
+ if (rcon->need_phvs)
+ {
+ /* RowExpr is certainly not strict, so always need PHV */
+ newnode = (Node *)
+ make_placeholder_expr(rcon->root,
+ (Expr *) newnode,
+ bms_make_singleton(rcon->varno));
+ /* cache it with the PHV, and with varlevelsup still zero */
+ rcon->rv_cache[InvalidAttrNumber] = copyObject(newnode);
+ }
+ }
+ else
+ {
+ /* Normal case referencing one targetlist element */
+ TargetEntry *tle = get_tle_by_resno(rcon->targetlist, varattno);
+
+ if (tle == NULL) /* shouldn't happen */
+ elog(ERROR, "could not find attribute %d in subquery targetlist",
+ varattno);
+
+ /* Make a copy of the tlist item to return */
+ newnode = (Node *) copyObject(tle->expr);
+
+ /* Insert PlaceHolderVar if needed */
+ if (rcon->need_phvs)
+ {
+ bool wrap;
+
+ if (newnode && IsA(newnode, Var) &&
+ ((Var *) newnode)->varlevelsup == 0)
+ {
+ /*
+ * Simple Vars always escape being wrapped, unless they are
+ * lateral references to something outside the subquery being
+ * pulled up. (Even then, we could omit the PlaceHolderVar if
+ * the referenced rel is under the same lowest outer join, but
+ * it doesn't seem worth the trouble to check that.)
+ */
+ if (rcon->target_rte->lateral &&
+ !bms_is_member(((Var *) newnode)->varno, rcon->relids))
+ wrap = true;
+ else
+ wrap = false;
+ }
+ else if (newnode && IsA(newnode, PlaceHolderVar) &&
+ ((PlaceHolderVar *) newnode)->phlevelsup == 0)
+ {
+ /* No need to wrap a PlaceHolderVar with another one, either */
+ wrap = false;
+ }
+ else if (rcon->wrap_non_vars)
+ {
+ /* Wrap all non-Vars in a PlaceHolderVar */
+ wrap = true;
+ }
+ else
+ {
+ /*
+ * If it contains a Var of the subquery being pulled up, and
+ * does not contain any non-strict constructs, then it's
+ * certainly nullable so we don't need to insert a
+ * PlaceHolderVar.
+ *
+ * This analysis could be tighter: in particular, a non-strict
+ * construct hidden within a lower-level PlaceHolderVar is not
+ * reason to add another PHV. But for now it doesn't seem
+ * worth the code to be more exact.
+ *
+ * Note: in future maybe we should insert a PlaceHolderVar
+ * anyway, if the tlist item is expensive to evaluate?
+ *
+ * For a LATERAL subquery, we have to check the actual var
+ * membership of the node, but if it's non-lateral then any
+ * level-zero var must belong to the subquery.
+ */
+ if ((rcon->target_rte->lateral ?
+ bms_overlap(pull_varnos(rcon->root, (Node *) newnode),
+ rcon->relids) :
+ contain_vars_of_level((Node *) newnode, 0)) &&
+ !contain_nonstrict_functions((Node *) newnode))
+ {
+ /* No wrap needed */
+ wrap = false;
+ }
+ else
+ {
+ /* Else wrap it in a PlaceHolderVar */
+ wrap = true;
+ }
+ }
+
+ if (wrap)
+ newnode = (Node *)
+ make_placeholder_expr(rcon->root,
+ (Expr *) newnode,
+ bms_make_singleton(rcon->varno));
+
+ /*
+ * Cache it if possible (ie, if the attno is in range, which it
+ * probably always should be). We can cache the value even if we
+ * decided we didn't need a PHV, since this result will be
+ * suitable for any request that has need_phvs.
+ */
+ if (varattno > InvalidAttrNumber &&
+ varattno <= list_length(rcon->targetlist))
+ rcon->rv_cache[varattno] = copyObject(newnode);
+ }
+ }
+
+ /* Must adjust varlevelsup if tlist item is from higher query */
+ if (var->varlevelsup > 0)
+ IncrementVarSublevelsUp(newnode, var->varlevelsup, 0);
+
+ return newnode;
+}
+
+/*
+ * Apply pullup variable replacement to a subquery
+ *
+ * This needs to be different from pullup_replace_vars() because
+ * replace_rte_variables will think that it shouldn't increment sublevels_up
+ * before entering the Query; so we need to call it with sublevels_up == 1.
+ */
+static Query *
+pullup_replace_vars_subquery(Query *query,
+ pullup_replace_vars_context *context)
+{
+ Assert(IsA(query, Query));
+ return (Query *) replace_rte_variables((Node *) query,
+ context->varno, 1,
+ pullup_replace_vars_callback,
+ (void *) context,
+ NULL);
+}
+
+
+/*
+ * flatten_simple_union_all
+ * Try to optimize top-level UNION ALL structure into an appendrel
+ *
+ * If a query's setOperations tree consists entirely of simple UNION ALL
+ * operations, flatten it into an append relation, which we can process more
+ * intelligently than the general setops case. Otherwise, do nothing.
+ *
+ * In most cases, this can succeed only for a top-level query, because for a
+ * subquery in FROM, the parent query's invocation of pull_up_subqueries would
+ * already have flattened the UNION via pull_up_simple_union_all. But there
+ * are a few cases we can support here but not in that code path, for example
+ * when the subquery also contains ORDER BY.
+ */
+void
+flatten_simple_union_all(PlannerInfo *root)
+{
+ Query *parse = root->parse;
+ SetOperationStmt *topop;
+ Node *leftmostjtnode;
+ int leftmostRTI;
+ RangeTblEntry *leftmostRTE;
+ int childRTI;
+ RangeTblEntry *childRTE;
+ RangeTblRef *rtr;
+
+ /* Shouldn't be called unless query has setops */
+ topop = castNode(SetOperationStmt, parse->setOperations);
+ Assert(topop);
+
+ /* Can't optimize away a recursive UNION */
+ if (root->hasRecursion)
+ return;
+
+ /*
+ * Recursively check the tree of set operations. If not all UNION ALL
+ * with identical column types, punt.
+ */
+ if (!is_simple_union_all_recurse((Node *) topop, parse, topop->colTypes))
+ return;
+
+ /*
+ * Locate the leftmost leaf query in the setops tree. The upper query's
+ * Vars all refer to this RTE (see transformSetOperationStmt).
+ */
+ leftmostjtnode = topop->larg;
+ while (leftmostjtnode && IsA(leftmostjtnode, SetOperationStmt))
+ leftmostjtnode = ((SetOperationStmt *) leftmostjtnode)->larg;
+ Assert(leftmostjtnode && IsA(leftmostjtnode, RangeTblRef));
+ leftmostRTI = ((RangeTblRef *) leftmostjtnode)->rtindex;
+ leftmostRTE = rt_fetch(leftmostRTI, parse->rtable);
+ Assert(leftmostRTE->rtekind == RTE_SUBQUERY);
+
+ /*
+ * Make a copy of the leftmost RTE and add it to the rtable. This copy
+ * will represent the leftmost leaf query in its capacity as a member of
+ * the appendrel. The original will represent the appendrel as a whole.
+ * (We must do things this way because the upper query's Vars have to be
+ * seen as referring to the whole appendrel.)
+ */
+ childRTE = copyObject(leftmostRTE);
+ parse->rtable = lappend(parse->rtable, childRTE);
+ childRTI = list_length(parse->rtable);
+
+ /* Modify the setops tree to reference the child copy */
+ ((RangeTblRef *) leftmostjtnode)->rtindex = childRTI;
+
+ /* Modify the formerly-leftmost RTE to mark it as an appendrel parent */
+ leftmostRTE->inh = true;
+
+ /*
+ * Form a RangeTblRef for the appendrel, and insert it into FROM. The top
+ * Query of a setops tree should have had an empty FromClause initially.
+ */
+ rtr = makeNode(RangeTblRef);
+ rtr->rtindex = leftmostRTI;
+ Assert(parse->jointree->fromlist == NIL);
+ parse->jointree->fromlist = list_make1(rtr);
+
+ /*
+ * Now pretend the query has no setops. We must do this before trying to
+ * do subquery pullup, because of Assert in pull_up_simple_subquery.
+ */
+ parse->setOperations = NULL;
+
+ /*
+ * Build AppendRelInfo information, and apply pull_up_subqueries to the
+ * leaf queries of the UNION ALL. (We must do that now because they
+ * weren't previously referenced by the jointree, and so were missed by
+ * the main invocation of pull_up_subqueries.)
+ */
+ pull_up_union_leaf_queries((Node *) topop, root, leftmostRTI, parse, 0);
+}
+
+
+/*
+ * reduce_outer_joins
+ * Attempt to reduce outer joins to plain inner joins.
+ *
+ * The idea here is that given a query like
+ * SELECT ... FROM a LEFT JOIN b ON (...) WHERE b.y = 42;
+ * we can reduce the LEFT JOIN to a plain JOIN if the "=" operator in WHERE
+ * is strict. The strict operator will always return NULL, causing the outer
+ * WHERE to fail, on any row where the LEFT JOIN filled in NULLs for b's
+ * columns. Therefore, there's no need for the join to produce null-extended
+ * rows in the first place --- which makes it a plain join not an outer join.
+ * (This scenario may not be very likely in a query written out by hand, but
+ * it's reasonably likely when pushing quals down into complex views.)
+ *
+ * More generally, an outer join can be reduced in strength if there is a
+ * strict qual above it in the qual tree that constrains a Var from the
+ * nullable side of the join to be non-null. (For FULL joins this applies
+ * to each side separately.)
+ *
+ * Another transformation we apply here is to recognize cases like
+ * SELECT ... FROM a LEFT JOIN b ON (a.x = b.y) WHERE b.y IS NULL;
+ * If the join clause is strict for b.y, then only null-extended rows could
+ * pass the upper WHERE, and we can conclude that what the query is really
+ * specifying is an anti-semijoin. We change the join type from JOIN_LEFT
+ * to JOIN_ANTI. The IS NULL clause then becomes redundant, and must be
+ * removed to prevent bogus selectivity calculations, but we leave it to
+ * distribute_qual_to_rels to get rid of such clauses.
+ *
+ * Also, we get rid of JOIN_RIGHT cases by flipping them around to become
+ * JOIN_LEFT. This saves some code here and in some later planner routines,
+ * but the main reason to do it is to not need to invent a JOIN_REVERSE_ANTI
+ * join type.
+ *
+ * To ease recognition of strict qual clauses, we require this routine to be
+ * run after expression preprocessing (i.e., qual canonicalization and JOIN
+ * alias-var expansion).
+ */
+void
+reduce_outer_joins(PlannerInfo *root)
+{
+ reduce_outer_joins_state *state;
+
+ /*
+ * To avoid doing strictness checks on more quals than necessary, we want
+ * to stop descending the jointree as soon as there are no outer joins
+ * below our current point. This consideration forces a two-pass process.
+ * The first pass gathers information about which base rels appear below
+ * each side of each join clause, and about whether there are outer
+ * join(s) below each side of each join clause. The second pass examines
+ * qual clauses and changes join types as it descends the tree.
+ */
+ state = reduce_outer_joins_pass1((Node *) root->parse->jointree);
+
+ /* planner.c shouldn't have called me if no outer joins */
+ if (state == NULL || !state->contains_outer)
+ elog(ERROR, "so where are the outer joins?");
+
+ reduce_outer_joins_pass2((Node *) root->parse->jointree,
+ state, root, NULL, NIL, NIL);
+}
+
+/*
+ * reduce_outer_joins_pass1 - phase 1 data collection
+ *
+ * Returns a state node describing the given jointree node.
+ */
+static reduce_outer_joins_state *
+reduce_outer_joins_pass1(Node *jtnode)
+{
+ reduce_outer_joins_state *result;
+
+ result = (reduce_outer_joins_state *)
+ palloc(sizeof(reduce_outer_joins_state));
+ result->relids = NULL;
+ result->contains_outer = false;
+ result->sub_states = NIL;
+
+ if (jtnode == NULL)
+ return result;
+ if (IsA(jtnode, RangeTblRef))
+ {
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+
+ result->relids = bms_make_singleton(varno);
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ ListCell *l;
+
+ foreach(l, f->fromlist)
+ {
+ reduce_outer_joins_state *sub_state;
+
+ sub_state = reduce_outer_joins_pass1(lfirst(l));
+ result->relids = bms_add_members(result->relids,
+ sub_state->relids);
+ result->contains_outer |= sub_state->contains_outer;
+ result->sub_states = lappend(result->sub_states, sub_state);
+ }
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+ reduce_outer_joins_state *sub_state;
+
+ /* join's own RT index is not wanted in result->relids */
+ if (IS_OUTER_JOIN(j->jointype))
+ result->contains_outer = true;
+
+ sub_state = reduce_outer_joins_pass1(j->larg);
+ result->relids = bms_add_members(result->relids,
+ sub_state->relids);
+ result->contains_outer |= sub_state->contains_outer;
+ result->sub_states = lappend(result->sub_states, sub_state);
+
+ sub_state = reduce_outer_joins_pass1(j->rarg);
+ result->relids = bms_add_members(result->relids,
+ sub_state->relids);
+ result->contains_outer |= sub_state->contains_outer;
+ result->sub_states = lappend(result->sub_states, sub_state);
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+ return result;
+}
+
+/*
+ * reduce_outer_joins_pass2 - phase 2 processing
+ *
+ * jtnode: current jointree node
+ * state: state data collected by phase 1 for this node
+ * root: toplevel planner state
+ * nonnullable_rels: set of base relids forced non-null by upper quals
+ * nonnullable_vars: list of Vars forced non-null by upper quals
+ * forced_null_vars: list of Vars forced null by upper quals
+ */
+static void
+reduce_outer_joins_pass2(Node *jtnode,
+ reduce_outer_joins_state *state,
+ PlannerInfo *root,
+ Relids nonnullable_rels,
+ List *nonnullable_vars,
+ List *forced_null_vars)
+{
+ /*
+ * pass 2 should never descend as far as an empty subnode or base rel,
+ * because it's only called on subtrees marked as contains_outer.
+ */
+ if (jtnode == NULL)
+ elog(ERROR, "reached empty jointree");
+ if (IsA(jtnode, RangeTblRef))
+ elog(ERROR, "reached base rel");
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ ListCell *l;
+ ListCell *s;
+ Relids pass_nonnullable_rels;
+ List *pass_nonnullable_vars;
+ List *pass_forced_null_vars;
+
+ /* Scan quals to see if we can add any constraints */
+ pass_nonnullable_rels = find_nonnullable_rels(f->quals);
+ pass_nonnullable_rels = bms_add_members(pass_nonnullable_rels,
+ nonnullable_rels);
+ pass_nonnullable_vars = find_nonnullable_vars(f->quals);
+ pass_nonnullable_vars = list_concat(pass_nonnullable_vars,
+ nonnullable_vars);
+ pass_forced_null_vars = find_forced_null_vars(f->quals);
+ pass_forced_null_vars = list_concat(pass_forced_null_vars,
+ forced_null_vars);
+ /* And recurse --- but only into interesting subtrees */
+ Assert(list_length(f->fromlist) == list_length(state->sub_states));
+ forboth(l, f->fromlist, s, state->sub_states)
+ {
+ reduce_outer_joins_state *sub_state = lfirst(s);
+
+ if (sub_state->contains_outer)
+ reduce_outer_joins_pass2(lfirst(l), sub_state, root,
+ pass_nonnullable_rels,
+ pass_nonnullable_vars,
+ pass_forced_null_vars);
+ }
+ bms_free(pass_nonnullable_rels);
+ /* can't so easily clean up var lists, unfortunately */
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+ int rtindex = j->rtindex;
+ JoinType jointype = j->jointype;
+ reduce_outer_joins_state *left_state = linitial(state->sub_states);
+ reduce_outer_joins_state *right_state = lsecond(state->sub_states);
+ List *local_nonnullable_vars = NIL;
+ bool computed_local_nonnullable_vars = false;
+
+ /* Can we simplify this join? */
+ switch (jointype)
+ {
+ case JOIN_INNER:
+ break;
+ case JOIN_LEFT:
+ if (bms_overlap(nonnullable_rels, right_state->relids))
+ jointype = JOIN_INNER;
+ break;
+ case JOIN_RIGHT:
+ if (bms_overlap(nonnullable_rels, left_state->relids))
+ jointype = JOIN_INNER;
+ break;
+ case JOIN_FULL:
+ if (bms_overlap(nonnullable_rels, left_state->relids))
+ {
+ if (bms_overlap(nonnullable_rels, right_state->relids))
+ jointype = JOIN_INNER;
+ else
+ jointype = JOIN_LEFT;
+ }
+ else
+ {
+ if (bms_overlap(nonnullable_rels, right_state->relids))
+ jointype = JOIN_RIGHT;
+ }
+ break;
+ case JOIN_SEMI:
+ case JOIN_ANTI:
+
+ /*
+ * These could only have been introduced by pull_up_sublinks,
+ * so there's no way that upper quals could refer to their
+ * righthand sides, and no point in checking.
+ */
+ break;
+ default:
+ elog(ERROR, "unrecognized join type: %d",
+ (int) jointype);
+ break;
+ }
+
+ /*
+ * Convert JOIN_RIGHT to JOIN_LEFT. Note that in the case where we
+ * reduced JOIN_FULL to JOIN_RIGHT, this will mean the JoinExpr no
+ * longer matches the internal ordering of any CoalesceExpr's built to
+ * represent merged join variables. We don't care about that at
+ * present, but be wary of it ...
+ */
+ if (jointype == JOIN_RIGHT)
+ {
+ Node *tmparg;
+
+ tmparg = j->larg;
+ j->larg = j->rarg;
+ j->rarg = tmparg;
+ jointype = JOIN_LEFT;
+ right_state = linitial(state->sub_states);
+ left_state = lsecond(state->sub_states);
+ }
+
+ /*
+ * See if we can reduce JOIN_LEFT to JOIN_ANTI. This is the case if
+ * the join's own quals are strict for any var that was forced null by
+ * higher qual levels. NOTE: there are other ways that we could
+ * detect an anti-join, in particular if we were to check whether Vars
+ * coming from the RHS must be non-null because of table constraints.
+ * That seems complicated and expensive though (in particular, one
+ * would have to be wary of lower outer joins). For the moment this
+ * seems sufficient.
+ */
+ if (jointype == JOIN_LEFT)
+ {
+ List *overlap;
+
+ local_nonnullable_vars = find_nonnullable_vars(j->quals);
+ computed_local_nonnullable_vars = true;
+
+ /*
+ * It's not sufficient to check whether local_nonnullable_vars and
+ * forced_null_vars overlap: we need to know if the overlap
+ * includes any RHS variables.
+ */
+ overlap = list_intersection(local_nonnullable_vars,
+ forced_null_vars);
+ if (overlap != NIL &&
+ bms_overlap(pull_varnos(root, (Node *) overlap),
+ right_state->relids))
+ jointype = JOIN_ANTI;
+ }
+
+ /* Apply the jointype change, if any, to both jointree node and RTE */
+ if (rtindex && jointype != j->jointype)
+ {
+ RangeTblEntry *rte = rt_fetch(rtindex, root->parse->rtable);
+
+ Assert(rte->rtekind == RTE_JOIN);
+ Assert(rte->jointype == j->jointype);
+ rte->jointype = jointype;
+ }
+ j->jointype = jointype;
+
+ /* Only recurse if there's more to do below here */
+ if (left_state->contains_outer || right_state->contains_outer)
+ {
+ Relids local_nonnullable_rels;
+ List *local_forced_null_vars;
+ Relids pass_nonnullable_rels;
+ List *pass_nonnullable_vars;
+ List *pass_forced_null_vars;
+
+ /*
+ * If this join is (now) inner, we can add any constraints its
+ * quals provide to those we got from above. But if it is outer,
+ * we can pass down the local constraints only into the nullable
+ * side, because an outer join never eliminates any rows from its
+ * non-nullable side. Also, there is no point in passing upper
+ * constraints into the nullable side, since if there were any
+ * we'd have been able to reduce the join. (In the case of upper
+ * forced-null constraints, we *must not* pass them into the
+ * nullable side --- they either applied here, or not.) The upshot
+ * is that we pass either the local or the upper constraints,
+ * never both, to the children of an outer join.
+ *
+ * Note that a SEMI join works like an inner join here: it's okay
+ * to pass down both local and upper constraints. (There can't be
+ * any upper constraints affecting its inner side, but it's not
+ * worth having a separate code path to avoid passing them.)
+ *
+ * At a FULL join we just punt and pass nothing down --- is it
+ * possible to be smarter?
+ */
+ if (jointype != JOIN_FULL)
+ {
+ local_nonnullable_rels = find_nonnullable_rels(j->quals);
+ if (!computed_local_nonnullable_vars)
+ local_nonnullable_vars = find_nonnullable_vars(j->quals);
+ local_forced_null_vars = find_forced_null_vars(j->quals);
+ if (jointype == JOIN_INNER || jointype == JOIN_SEMI)
+ {
+ /* OK to merge upper and local constraints */
+ local_nonnullable_rels = bms_add_members(local_nonnullable_rels,
+ nonnullable_rels);
+ local_nonnullable_vars = list_concat(local_nonnullable_vars,
+ nonnullable_vars);
+ local_forced_null_vars = list_concat(local_forced_null_vars,
+ forced_null_vars);
+ }
+ }
+ else
+ {
+ /* no use in calculating these */
+ local_nonnullable_rels = NULL;
+ local_forced_null_vars = NIL;
+ }
+
+ if (left_state->contains_outer)
+ {
+ if (jointype == JOIN_INNER || jointype == JOIN_SEMI)
+ {
+ /* pass union of local and upper constraints */
+ pass_nonnullable_rels = local_nonnullable_rels;
+ pass_nonnullable_vars = local_nonnullable_vars;
+ pass_forced_null_vars = local_forced_null_vars;
+ }
+ else if (jointype != JOIN_FULL) /* ie, LEFT or ANTI */
+ {
+ /* can't pass local constraints to non-nullable side */
+ pass_nonnullable_rels = nonnullable_rels;
+ pass_nonnullable_vars = nonnullable_vars;
+ pass_forced_null_vars = forced_null_vars;
+ }
+ else
+ {
+ /* no constraints pass through JOIN_FULL */
+ pass_nonnullable_rels = NULL;
+ pass_nonnullable_vars = NIL;
+ pass_forced_null_vars = NIL;
+ }
+ reduce_outer_joins_pass2(j->larg, left_state, root,
+ pass_nonnullable_rels,
+ pass_nonnullable_vars,
+ pass_forced_null_vars);
+ }
+
+ if (right_state->contains_outer)
+ {
+ if (jointype != JOIN_FULL) /* ie, INNER/LEFT/SEMI/ANTI */
+ {
+ /* pass appropriate constraints, per comment above */
+ pass_nonnullable_rels = local_nonnullable_rels;
+ pass_nonnullable_vars = local_nonnullable_vars;
+ pass_forced_null_vars = local_forced_null_vars;
+ }
+ else
+ {
+ /* no constraints pass through JOIN_FULL */
+ pass_nonnullable_rels = NULL;
+ pass_nonnullable_vars = NIL;
+ pass_forced_null_vars = NIL;
+ }
+ reduce_outer_joins_pass2(j->rarg, right_state, root,
+ pass_nonnullable_rels,
+ pass_nonnullable_vars,
+ pass_forced_null_vars);
+ }
+ bms_free(local_nonnullable_rels);
+ }
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+}
+
+
+/*
+ * remove_useless_result_rtes
+ * Attempt to remove RTE_RESULT RTEs from the join tree.
+ *
+ * We can remove RTE_RESULT entries from the join tree using the knowledge
+ * that RTE_RESULT returns exactly one row and has no output columns. Hence,
+ * if one is inner-joined to anything else, we can delete it. Optimizations
+ * are also possible for some outer-join cases, as detailed below.
+ *
+ * Some of these optimizations depend on recognizing empty (constant-true)
+ * quals for FromExprs and JoinExprs. That makes it useful to apply this
+ * optimization pass after expression preprocessing, since that will have
+ * eliminated constant-true quals, allowing more cases to be recognized as
+ * optimizable. What's more, the usual reason for an RTE_RESULT to be present
+ * is that we pulled up a subquery or VALUES clause, thus very possibly
+ * replacing Vars with constants, making it more likely that a qual can be
+ * reduced to constant true. Also, because some optimizations depend on
+ * the outer-join type, it's best to have done reduce_outer_joins() first.
+ *
+ * A PlaceHolderVar referencing an RTE_RESULT RTE poses an obstacle to this
+ * process: we must remove the RTE_RESULT's relid from the PHV's phrels, but
+ * we must not reduce the phrels set to empty. If that would happen, and
+ * the RTE_RESULT is an immediate child of an outer join, we have to give up
+ * and not remove the RTE_RESULT: there is noplace else to evaluate the
+ * PlaceHolderVar. (That is, in such cases the RTE_RESULT *does* have output
+ * columns.) But if the RTE_RESULT is an immediate child of an inner join,
+ * we can usually change the PlaceHolderVar's phrels so as to evaluate it at
+ * the inner join instead. This is OK because we really only care that PHVs
+ * are evaluated above or below the correct outer joins. We can't, however,
+ * postpone the evaluation of a PHV to above where it is used; so there are
+ * some checks below on whether output PHVs are laterally referenced in the
+ * other join input rel(s).
+ *
+ * We used to try to do this work as part of pull_up_subqueries() where the
+ * potentially-optimizable cases get introduced; but it's way simpler, and
+ * more effective, to do it separately.
+ */
+void
+remove_useless_result_rtes(PlannerInfo *root)
+{
+ ListCell *cell;
+
+ /* Top level of jointree must always be a FromExpr */
+ Assert(IsA(root->parse->jointree, FromExpr));
+ /* Recurse ... */
+ root->parse->jointree = (FromExpr *)
+ remove_useless_results_recurse(root, (Node *) root->parse->jointree);
+ /* We should still have a FromExpr */
+ Assert(IsA(root->parse->jointree, FromExpr));
+
+ /*
+ * Remove any PlanRowMark referencing an RTE_RESULT RTE. We obviously
+ * must do that for any RTE_RESULT that we just removed. But one for a
+ * RTE that we did not remove can be dropped anyway: since the RTE has
+ * only one possible output row, there is no need for EPQ to mark and
+ * restore that row.
+ *
+ * It's necessary, not optional, to remove the PlanRowMark for a surviving
+ * RTE_RESULT RTE; otherwise we'll generate a whole-row Var for the
+ * RTE_RESULT, which the executor has no support for.
+ */
+ foreach(cell, root->rowMarks)
+ {
+ PlanRowMark *rc = (PlanRowMark *) lfirst(cell);
+
+ if (rt_fetch(rc->rti, root->parse->rtable)->rtekind == RTE_RESULT)
+ root->rowMarks = foreach_delete_current(root->rowMarks, cell);
+ }
+}
+
+/*
+ * remove_useless_results_recurse
+ * Recursive guts of remove_useless_result_rtes.
+ *
+ * This recursively processes the jointree and returns a modified jointree.
+ */
+static Node *
+remove_useless_results_recurse(PlannerInfo *root, Node *jtnode)
+{
+ Assert(jtnode != NULL);
+ if (IsA(jtnode, RangeTblRef))
+ {
+ /* Can't immediately do anything with a RangeTblRef */
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ Relids result_relids = NULL;
+ ListCell *cell;
+
+ /*
+ * We can drop RTE_RESULT rels from the fromlist so long as at least
+ * one child remains, since joining to a one-row table changes
+ * nothing. (But we can't drop a RTE_RESULT that computes PHV(s) that
+ * are needed by some sibling. The cleanup transformation below would
+ * reassign the PHVs to be computed at the join, which is too late for
+ * the sibling's use.) The easiest way to mechanize this rule is to
+ * modify the list in-place.
+ */
+ foreach(cell, f->fromlist)
+ {
+ Node *child = (Node *) lfirst(cell);
+ int varno;
+
+ /* Recursively transform child ... */
+ child = remove_useless_results_recurse(root, child);
+ /* ... and stick it back into the tree */
+ lfirst(cell) = child;
+
+ /*
+ * If it's an RTE_RESULT with at least one sibling, and no sibling
+ * references dependent PHVs, we can drop it. We don't yet know
+ * what the inner join's final relid set will be, so postpone
+ * cleanup of PHVs etc till after this loop.
+ */
+ if (list_length(f->fromlist) > 1 &&
+ (varno = get_result_relid(root, child)) != 0 &&
+ !find_dependent_phvs_in_jointree(root, (Node *) f, varno))
+ {
+ f->fromlist = foreach_delete_current(f->fromlist, cell);
+ result_relids = bms_add_member(result_relids, varno);
+ }
+ }
+
+ /*
+ * Clean up if we dropped any RTE_RESULT RTEs. This is a bit
+ * inefficient if there's more than one, but it seems better to
+ * optimize the support code for the single-relid case.
+ */
+ if (result_relids)
+ {
+ int varno = -1;
+
+ while ((varno = bms_next_member(result_relids, varno)) >= 0)
+ remove_result_refs(root, varno, (Node *) f);
+ }
+
+ /*
+ * If we're not at the top of the jointree, it's valid to simplify a
+ * degenerate FromExpr into its single child. (At the top, we must
+ * keep the FromExpr since Query.jointree is required to point to a
+ * FromExpr.)
+ */
+ if (f != root->parse->jointree &&
+ f->quals == NULL &&
+ list_length(f->fromlist) == 1)
+ return (Node *) linitial(f->fromlist);
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+ int varno;
+
+ /* First, recurse */
+ j->larg = remove_useless_results_recurse(root, j->larg);
+ j->rarg = remove_useless_results_recurse(root, j->rarg);
+
+ /* Apply join-type-specific optimization rules */
+ switch (j->jointype)
+ {
+ case JOIN_INNER:
+
+ /*
+ * An inner join is equivalent to a FromExpr, so if either
+ * side was simplified to an RTE_RESULT rel, we can replace
+ * the join with a FromExpr with just the other side; and if
+ * the qual is empty (JOIN ON TRUE) then we can omit the
+ * FromExpr as well.
+ *
+ * Just as in the FromExpr case, we can't simplify if the
+ * other input rel references any PHVs that are marked as to
+ * be evaluated at the RTE_RESULT rel, because we can't
+ * postpone their evaluation in that case. But we only have
+ * to check this in cases where it's syntactically legal for
+ * the other input to have a LATERAL reference to the
+ * RTE_RESULT rel. Only RHSes of inner and left joins are
+ * allowed to have such refs.
+ */
+ if ((varno = get_result_relid(root, j->larg)) != 0 &&
+ !find_dependent_phvs_in_jointree(root, j->rarg, varno))
+ {
+ remove_result_refs(root, varno, j->rarg);
+ if (j->quals)
+ jtnode = (Node *)
+ makeFromExpr(list_make1(j->rarg), j->quals);
+ else
+ jtnode = j->rarg;
+ }
+ else if ((varno = get_result_relid(root, j->rarg)) != 0)
+ {
+ remove_result_refs(root, varno, j->larg);
+ if (j->quals)
+ jtnode = (Node *)
+ makeFromExpr(list_make1(j->larg), j->quals);
+ else
+ jtnode = j->larg;
+ }
+ break;
+ case JOIN_LEFT:
+
+ /*
+ * We can simplify this case if the RHS is an RTE_RESULT, with
+ * two different possibilities:
+ *
+ * If the qual is empty (JOIN ON TRUE), then the join can be
+ * strength-reduced to a plain inner join, since each LHS row
+ * necessarily has exactly one join partner. So we can always
+ * discard the RHS, much as in the JOIN_INNER case above.
+ * (Again, the LHS could not contain a lateral reference to
+ * the RHS.)
+ *
+ * Otherwise, it's still true that each LHS row should be
+ * returned exactly once, and since the RHS returns no columns
+ * (unless there are PHVs that have to be evaluated there), we
+ * don't much care if it's null-extended or not. So in this
+ * case also, we can just ignore the qual and discard the left
+ * join.
+ */
+ if ((varno = get_result_relid(root, j->rarg)) != 0 &&
+ (j->quals == NULL ||
+ !find_dependent_phvs(root, varno)))
+ {
+ remove_result_refs(root, varno, j->larg);
+ jtnode = j->larg;
+ }
+ break;
+ case JOIN_SEMI:
+
+ /*
+ * We may simplify this case if the RHS is an RTE_RESULT; the
+ * join qual becomes effectively just a filter qual for the
+ * LHS, since we should either return the LHS row or not. For
+ * simplicity we inject the filter qual into a new FromExpr.
+ *
+ * There is a fine point about PHVs that are supposed to be
+ * evaluated at the RHS. Such PHVs could only appear in the
+ * semijoin's qual, since the rest of the query cannot
+ * reference any outputs of the semijoin's RHS. Therefore,
+ * they can't actually go to null before being examined, and
+ * it'd be OK to just remove the PHV wrapping. We don't have
+ * infrastructure for that, but remove_result_refs() will
+ * relabel them as to be evaluated at the LHS, which is fine.
+ */
+ if ((varno = get_result_relid(root, j->rarg)) != 0)
+ {
+ remove_result_refs(root, varno, j->larg);
+ if (j->quals)
+ jtnode = (Node *)
+ makeFromExpr(list_make1(j->larg), j->quals);
+ else
+ jtnode = j->larg;
+ }
+ break;
+ case JOIN_FULL:
+ case JOIN_ANTI:
+ /* We have no special smarts for these cases */
+ break;
+ default:
+ /* Note: JOIN_RIGHT should be gone at this point */
+ elog(ERROR, "unrecognized join type: %d",
+ (int) j->jointype);
+ break;
+ }
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+ return jtnode;
+}
+
+/*
+ * get_result_relid
+ * If jtnode is a RangeTblRef for an RTE_RESULT RTE, return its relid;
+ * otherwise return 0.
+ */
+static int
+get_result_relid(PlannerInfo *root, Node *jtnode)
+{
+ int varno;
+
+ if (!IsA(jtnode, RangeTblRef))
+ return 0;
+ varno = ((RangeTblRef *) jtnode)->rtindex;
+ if (rt_fetch(varno, root->parse->rtable)->rtekind != RTE_RESULT)
+ return 0;
+ return varno;
+}
+
+/*
+ * remove_result_refs
+ * Helper routine for dropping an unneeded RTE_RESULT RTE.
+ *
+ * This doesn't physically remove the RTE from the jointree, because that's
+ * more easily handled in remove_useless_results_recurse. What it does do
+ * is the necessary cleanup in the rest of the tree: we must adjust any PHVs
+ * that may reference the RTE. Be sure to call this at a point where the
+ * jointree is valid (no disconnected nodes).
+ *
+ * Note that we don't need to process the append_rel_list, since RTEs
+ * referenced directly in the jointree won't be appendrel members.
+ *
+ * varno is the RTE_RESULT's relid.
+ * newjtloc is the jointree location at which any PHVs referencing the
+ * RTE_RESULT should be evaluated instead.
+ */
+static void
+remove_result_refs(PlannerInfo *root, int varno, Node *newjtloc)
+{
+ /* Fix up PlaceHolderVars as needed */
+ /* If there are no PHVs anywhere, we can skip this bit */
+ if (root->glob->lastPHId != 0)
+ {
+ Relids subrelids;
+
+ subrelids = get_relids_in_jointree(newjtloc, false);
+ Assert(!bms_is_empty(subrelids));
+ substitute_phv_relids((Node *) root->parse, varno, subrelids);
+ fix_append_rel_relids(root->append_rel_list, varno, subrelids);
+ }
+
+ /*
+ * We also need to remove any PlanRowMark referencing the RTE, but we
+ * postpone that work until we return to remove_useless_result_rtes.
+ */
+}
+
+
+/*
+ * find_dependent_phvs - are there any PlaceHolderVars whose relids are
+ * exactly the given varno?
+ *
+ * find_dependent_phvs should be used when we want to see if there are
+ * any such PHVs anywhere in the Query. Another use-case is to see if
+ * a subtree of the join tree contains such PHVs; but for that, we have
+ * to look not only at the join tree nodes themselves but at the
+ * referenced RTEs. For that, use find_dependent_phvs_in_jointree.
+ */
+
+typedef struct
+{
+ Relids relids;
+ int sublevels_up;
+} find_dependent_phvs_context;
+
+static bool
+find_dependent_phvs_walker(Node *node,
+ find_dependent_phvs_context *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+
+ if (phv->phlevelsup == context->sublevels_up &&
+ bms_equal(context->relids, phv->phrels))
+ return true;
+ /* fall through to examine children */
+ }
+ if (IsA(node, Query))
+ {
+ /* Recurse into subselects */
+ bool result;
+
+ context->sublevels_up++;
+ result = query_tree_walker((Query *) node,
+ find_dependent_phvs_walker,
+ (void *) context, 0);
+ context->sublevels_up--;
+ return result;
+ }
+ /* Shouldn't need to handle planner auxiliary nodes here */
+ Assert(!IsA(node, SpecialJoinInfo));
+ Assert(!IsA(node, AppendRelInfo));
+ Assert(!IsA(node, PlaceHolderInfo));
+ Assert(!IsA(node, MinMaxAggInfo));
+
+ return expression_tree_walker(node, find_dependent_phvs_walker,
+ (void *) context);
+}
+
+static bool
+find_dependent_phvs(PlannerInfo *root, int varno)
+{
+ find_dependent_phvs_context context;
+
+ /* If there are no PHVs anywhere, we needn't work hard */
+ if (root->glob->lastPHId == 0)
+ return false;
+
+ context.relids = bms_make_singleton(varno);
+ context.sublevels_up = 0;
+
+ return query_tree_walker(root->parse,
+ find_dependent_phvs_walker,
+ (void *) &context,
+ 0);
+}
+
+static bool
+find_dependent_phvs_in_jointree(PlannerInfo *root, Node *node, int varno)
+{
+ find_dependent_phvs_context context;
+ Relids subrelids;
+ int relid;
+
+ /* If there are no PHVs anywhere, we needn't work hard */
+ if (root->glob->lastPHId == 0)
+ return false;
+
+ context.relids = bms_make_singleton(varno);
+ context.sublevels_up = 0;
+
+ /*
+ * See if the jointree fragment itself contains references (in join quals)
+ */
+ if (find_dependent_phvs_walker(node, &context))
+ return true;
+
+ /*
+ * Otherwise, identify the set of referenced RTEs (we can ignore joins,
+ * since they should be flattened already, so their join alias lists no
+ * longer matter), and tediously check each RTE. We can ignore RTEs that
+ * are not marked LATERAL, though, since they couldn't possibly contain
+ * any cross-references to other RTEs.
+ */
+ subrelids = get_relids_in_jointree(node, false);
+ relid = -1;
+ while ((relid = bms_next_member(subrelids, relid)) >= 0)
+ {
+ RangeTblEntry *rte = rt_fetch(relid, root->parse->rtable);
+
+ if (rte->lateral &&
+ range_table_entry_walker(rte,
+ find_dependent_phvs_walker,
+ (void *) &context,
+ 0))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * substitute_phv_relids - adjust PlaceHolderVar relid sets after pulling up
+ * a subquery or removing an RTE_RESULT jointree item
+ *
+ * Find any PlaceHolderVar nodes in the given tree that reference the
+ * pulled-up relid, and change them to reference the replacement relid(s).
+ *
+ * NOTE: although this has the form of a walker, we cheat and modify the
+ * nodes in-place. This should be OK since the tree was copied by
+ * pullup_replace_vars earlier. Avoid scribbling on the original values of
+ * the bitmapsets, though, because expression_tree_mutator doesn't copy those.
+ */
+
+typedef struct
+{
+ int varno;
+ int sublevels_up;
+ Relids subrelids;
+} substitute_phv_relids_context;
+
+static bool
+substitute_phv_relids_walker(Node *node,
+ substitute_phv_relids_context *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+
+ if (phv->phlevelsup == context->sublevels_up &&
+ bms_is_member(context->varno, phv->phrels))
+ {
+ phv->phrels = bms_union(phv->phrels,
+ context->subrelids);
+ phv->phrels = bms_del_member(phv->phrels,
+ context->varno);
+ /* Assert we haven't broken the PHV */
+ Assert(!bms_is_empty(phv->phrels));
+ }
+ /* fall through to examine children */
+ }
+ if (IsA(node, Query))
+ {
+ /* Recurse into subselects */
+ bool result;
+
+ context->sublevels_up++;
+ result = query_tree_walker((Query *) node,
+ substitute_phv_relids_walker,
+ (void *) context, 0);
+ context->sublevels_up--;
+ return result;
+ }
+ /* Shouldn't need to handle planner auxiliary nodes here */
+ Assert(!IsA(node, SpecialJoinInfo));
+ Assert(!IsA(node, AppendRelInfo));
+ Assert(!IsA(node, PlaceHolderInfo));
+ Assert(!IsA(node, MinMaxAggInfo));
+
+ return expression_tree_walker(node, substitute_phv_relids_walker,
+ (void *) context);
+}
+
+static void
+substitute_phv_relids(Node *node, int varno, Relids subrelids)
+{
+ substitute_phv_relids_context context;
+
+ context.varno = varno;
+ context.sublevels_up = 0;
+ context.subrelids = subrelids;
+
+ /*
+ * Must be prepared to start with a Query or a bare expression tree.
+ */
+ query_or_expression_tree_walker(node,
+ substitute_phv_relids_walker,
+ (void *) &context,
+ 0);
+}
+
+/*
+ * fix_append_rel_relids: update RT-index fields of AppendRelInfo nodes
+ *
+ * When we pull up a subquery, any AppendRelInfo references to the subquery's
+ * RT index have to be replaced by the substituted relid (and there had better
+ * be only one). We also need to apply substitute_phv_relids to their
+ * translated_vars lists, since those might contain PlaceHolderVars.
+ *
+ * We assume we may modify the AppendRelInfo nodes in-place.
+ */
+static void
+fix_append_rel_relids(List *append_rel_list, int varno, Relids subrelids)
+{
+ ListCell *l;
+ int subvarno = -1;
+
+ /*
+ * We only want to extract the member relid once, but we mustn't fail
+ * immediately if there are multiple members; it could be that none of the
+ * AppendRelInfo nodes refer to it. So compute it on first use. Note that
+ * bms_singleton_member will complain if set is not singleton.
+ */
+ foreach(l, append_rel_list)
+ {
+ AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
+
+ /* The parent_relid shouldn't ever be a pullup target */
+ Assert(appinfo->parent_relid != varno);
+
+ if (appinfo->child_relid == varno)
+ {
+ if (subvarno < 0)
+ subvarno = bms_singleton_member(subrelids);
+ appinfo->child_relid = subvarno;
+ }
+
+ /* Also fix up any PHVs in its translated vars */
+ substitute_phv_relids((Node *) appinfo->translated_vars,
+ varno, subrelids);
+ }
+}
+
+/*
+ * get_relids_in_jointree: get set of RT indexes present in a jointree
+ *
+ * If include_joins is true, join RT indexes are included; if false,
+ * only base rels are included.
+ */
+Relids
+get_relids_in_jointree(Node *jtnode, bool include_joins)
+{
+ Relids result = NULL;
+
+ if (jtnode == NULL)
+ return result;
+ if (IsA(jtnode, RangeTblRef))
+ {
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+
+ result = bms_make_singleton(varno);
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ ListCell *l;
+
+ foreach(l, f->fromlist)
+ {
+ result = bms_join(result,
+ get_relids_in_jointree(lfirst(l),
+ include_joins));
+ }
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+
+ result = get_relids_in_jointree(j->larg, include_joins);
+ result = bms_join(result,
+ get_relids_in_jointree(j->rarg, include_joins));
+ if (include_joins && j->rtindex)
+ result = bms_add_member(result, j->rtindex);
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+ return result;
+}
+
+/*
+ * get_relids_for_join: get set of base RT indexes making up a join
+ */
+Relids
+get_relids_for_join(Query *query, int joinrelid)
+{
+ Node *jtnode;
+
+ jtnode = find_jointree_node_for_rel((Node *) query->jointree,
+ joinrelid);
+ if (!jtnode)
+ elog(ERROR, "could not find join node %d", joinrelid);
+ return get_relids_in_jointree(jtnode, false);
+}
+
+/*
+ * find_jointree_node_for_rel: locate jointree node for a base or join RT index
+ *
+ * Returns NULL if not found
+ */
+static Node *
+find_jointree_node_for_rel(Node *jtnode, int relid)
+{
+ if (jtnode == NULL)
+ return NULL;
+ if (IsA(jtnode, RangeTblRef))
+ {
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+
+ if (relid == varno)
+ return jtnode;
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ ListCell *l;
+
+ foreach(l, f->fromlist)
+ {
+ jtnode = find_jointree_node_for_rel(lfirst(l), relid);
+ if (jtnode)
+ return jtnode;
+ }
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+
+ if (relid == j->rtindex)
+ return jtnode;
+ jtnode = find_jointree_node_for_rel(j->larg, relid);
+ if (jtnode)
+ return jtnode;
+ jtnode = find_jointree_node_for_rel(j->rarg, relid);
+ if (jtnode)
+ return jtnode;
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+ return NULL;
+}
diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c
new file mode 100644
index 0000000..da01234
--- /dev/null
+++ b/src/backend/optimizer/prep/prepqual.c
@@ -0,0 +1,677 @@
+/*-------------------------------------------------------------------------
+ *
+ * prepqual.c
+ * Routines for preprocessing qualification expressions
+ *
+ *
+ * While the parser will produce flattened (N-argument) AND/OR trees from
+ * simple sequences of AND'ed or OR'ed clauses, there might be an AND clause
+ * directly underneath another AND, or OR underneath OR, if the input was
+ * oddly parenthesized. Also, rule expansion and subquery flattening could
+ * produce such parsetrees. The planner wants to flatten all such cases
+ * to ensure consistent optimization behavior.
+ *
+ * Formerly, this module was responsible for doing the initial flattening,
+ * but now we leave it to eval_const_expressions to do that since it has to
+ * make a complete pass over the expression tree anyway. Instead, we just
+ * have to ensure that our manipulations preserve AND/OR flatness.
+ * pull_ands() and pull_ors() are used to maintain flatness of the AND/OR
+ * tree after local transformations that might introduce nested AND/ORs.
+ *
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/prep/prepqual.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/prep.h"
+#include "utils/lsyscache.h"
+
+
+static List *pull_ands(List *andlist);
+static List *pull_ors(List *orlist);
+static Expr *find_duplicate_ors(Expr *qual, bool is_check);
+static Expr *process_duplicate_ors(List *orlist);
+
+
+/*
+ * negate_clause
+ * Negate a Boolean expression.
+ *
+ * Input is a clause to be negated (e.g., the argument of a NOT clause).
+ * Returns a new clause equivalent to the negation of the given clause.
+ *
+ * Although this can be invoked on its own, it's mainly intended as a helper
+ * for eval_const_expressions(), and that context drives several design
+ * decisions. In particular, if the input is already AND/OR flat, we must
+ * preserve that property. We also don't bother to recurse in situations
+ * where we can assume that lower-level executions of eval_const_expressions
+ * would already have simplified sub-clauses of the input.
+ *
+ * The difference between this and a simple make_notclause() is that this
+ * tries to get rid of the NOT node by logical simplification. It's clearly
+ * always a win if the NOT node can be eliminated altogether. However, our
+ * use of DeMorgan's laws could result in having more NOT nodes rather than
+ * fewer. We do that unconditionally anyway, because in WHERE clauses it's
+ * important to expose as much top-level AND/OR structure as possible.
+ * Also, eliminating an intermediate NOT may allow us to flatten two levels
+ * of AND or OR together that we couldn't have otherwise. Finally, one of
+ * the motivations for doing this is to ensure that logically equivalent
+ * expressions will be seen as physically equal(), so we should always apply
+ * the same transformations.
+ */
+Node *
+negate_clause(Node *node)
+{
+ if (node == NULL) /* should not happen */
+ elog(ERROR, "can't negate an empty subexpression");
+ switch (nodeTag(node))
+ {
+ case T_Const:
+ {
+ Const *c = (Const *) node;
+
+ /* NOT NULL is still NULL */
+ if (c->constisnull)
+ return makeBoolConst(false, true);
+ /* otherwise pretty easy */
+ return makeBoolConst(!DatumGetBool(c->constvalue), false);
+ }
+ break;
+ case T_OpExpr:
+ {
+ /*
+ * Negate operator if possible: (NOT (< A B)) => (>= A B)
+ */
+ OpExpr *opexpr = (OpExpr *) node;
+ Oid negator = get_negator(opexpr->opno);
+
+ if (negator)
+ {
+ OpExpr *newopexpr = makeNode(OpExpr);
+
+ newopexpr->opno = negator;
+ newopexpr->opfuncid = InvalidOid;
+ newopexpr->opresulttype = opexpr->opresulttype;
+ newopexpr->opretset = opexpr->opretset;
+ newopexpr->opcollid = opexpr->opcollid;
+ newopexpr->inputcollid = opexpr->inputcollid;
+ newopexpr->args = opexpr->args;
+ newopexpr->location = opexpr->location;
+ return (Node *) newopexpr;
+ }
+ }
+ break;
+ case T_ScalarArrayOpExpr:
+ {
+ /*
+ * Negate a ScalarArrayOpExpr if its operator has a negator;
+ * for example x = ANY (list) becomes x <> ALL (list)
+ */
+ ScalarArrayOpExpr *saopexpr = (ScalarArrayOpExpr *) node;
+ Oid negator = get_negator(saopexpr->opno);
+
+ if (negator)
+ {
+ ScalarArrayOpExpr *newopexpr = makeNode(ScalarArrayOpExpr);
+
+ newopexpr->opno = negator;
+ newopexpr->opfuncid = InvalidOid;
+ newopexpr->hashfuncid = InvalidOid;
+ newopexpr->negfuncid = InvalidOid;
+ newopexpr->useOr = !saopexpr->useOr;
+ newopexpr->inputcollid = saopexpr->inputcollid;
+ newopexpr->args = saopexpr->args;
+ newopexpr->location = saopexpr->location;
+ return (Node *) newopexpr;
+ }
+ }
+ break;
+ case T_BoolExpr:
+ {
+ BoolExpr *expr = (BoolExpr *) node;
+
+ switch (expr->boolop)
+ {
+ /*--------------------
+ * Apply DeMorgan's Laws:
+ * (NOT (AND A B)) => (OR (NOT A) (NOT B))
+ * (NOT (OR A B)) => (AND (NOT A) (NOT B))
+ * i.e., swap AND for OR and negate each subclause.
+ *
+ * If the input is already AND/OR flat and has no NOT
+ * directly above AND or OR, this transformation preserves
+ * those properties. For example, if no direct child of
+ * the given AND clause is an AND or a NOT-above-OR, then
+ * the recursive calls of negate_clause() can't return any
+ * OR clauses. So we needn't call pull_ors() before
+ * building a new OR clause. Similarly for the OR case.
+ *--------------------
+ */
+ case AND_EXPR:
+ {
+ List *nargs = NIL;
+ ListCell *lc;
+
+ foreach(lc, expr->args)
+ {
+ nargs = lappend(nargs,
+ negate_clause(lfirst(lc)));
+ }
+ return (Node *) make_orclause(nargs);
+ }
+ break;
+ case OR_EXPR:
+ {
+ List *nargs = NIL;
+ ListCell *lc;
+
+ foreach(lc, expr->args)
+ {
+ nargs = lappend(nargs,
+ negate_clause(lfirst(lc)));
+ }
+ return (Node *) make_andclause(nargs);
+ }
+ break;
+ case NOT_EXPR:
+
+ /*
+ * NOT underneath NOT: they cancel. We assume the
+ * input is already simplified, so no need to recurse.
+ */
+ return (Node *) linitial(expr->args);
+ default:
+ elog(ERROR, "unrecognized boolop: %d",
+ (int) expr->boolop);
+ break;
+ }
+ }
+ break;
+ case T_NullTest:
+ {
+ NullTest *expr = (NullTest *) node;
+
+ /*
+ * In the rowtype case, the two flavors of NullTest are *not*
+ * logical inverses, so we can't simplify. But it does work
+ * for scalar datatypes.
+ */
+ if (!expr->argisrow)
+ {
+ NullTest *newexpr = makeNode(NullTest);
+
+ newexpr->arg = expr->arg;
+ newexpr->nulltesttype = (expr->nulltesttype == IS_NULL ?
+ IS_NOT_NULL : IS_NULL);
+ newexpr->argisrow = expr->argisrow;
+ newexpr->location = expr->location;
+ return (Node *) newexpr;
+ }
+ }
+ break;
+ case T_BooleanTest:
+ {
+ BooleanTest *expr = (BooleanTest *) node;
+ BooleanTest *newexpr = makeNode(BooleanTest);
+
+ newexpr->arg = expr->arg;
+ switch (expr->booltesttype)
+ {
+ case IS_TRUE:
+ newexpr->booltesttype = IS_NOT_TRUE;
+ break;
+ case IS_NOT_TRUE:
+ newexpr->booltesttype = IS_TRUE;
+ break;
+ case IS_FALSE:
+ newexpr->booltesttype = IS_NOT_FALSE;
+ break;
+ case IS_NOT_FALSE:
+ newexpr->booltesttype = IS_FALSE;
+ break;
+ case IS_UNKNOWN:
+ newexpr->booltesttype = IS_NOT_UNKNOWN;
+ break;
+ case IS_NOT_UNKNOWN:
+ newexpr->booltesttype = IS_UNKNOWN;
+ break;
+ default:
+ elog(ERROR, "unrecognized booltesttype: %d",
+ (int) expr->booltesttype);
+ break;
+ }
+ newexpr->location = expr->location;
+ return (Node *) newexpr;
+ }
+ break;
+ default:
+ /* else fall through */
+ break;
+ }
+
+ /*
+ * Otherwise we don't know how to simplify this, so just tack on an
+ * explicit NOT node.
+ */
+ return (Node *) make_notclause((Expr *) node);
+}
+
+
+/*
+ * canonicalize_qual
+ * Convert a qualification expression to the most useful form.
+ *
+ * This is primarily intended to be used on top-level WHERE (or JOIN/ON)
+ * clauses. It can also be used on top-level CHECK constraints, for which
+ * pass is_check = true. DO NOT call it on any expression that is not known
+ * to be one or the other, as it might apply inappropriate simplifications.
+ *
+ * The name of this routine is a holdover from a time when it would try to
+ * force the expression into canonical AND-of-ORs or OR-of-ANDs form.
+ * Eventually, we recognized that that had more theoretical purity than
+ * actual usefulness, and so now the transformation doesn't involve any
+ * notion of reaching a canonical form.
+ *
+ * NOTE: we assume the input has already been through eval_const_expressions
+ * and therefore possesses AND/OR flatness. Formerly this function included
+ * its own flattening logic, but that requires a useless extra pass over the
+ * tree.
+ *
+ * Returns the modified qualification.
+ */
+Expr *
+canonicalize_qual(Expr *qual, bool is_check)
+{
+ Expr *newqual;
+
+ /* Quick exit for empty qual */
+ if (qual == NULL)
+ return NULL;
+
+ /* This should not be invoked on quals in implicit-AND format */
+ Assert(!IsA(qual, List));
+
+ /*
+ * Pull up redundant subclauses in OR-of-AND trees. We do this only
+ * within the top-level AND/OR structure; there's no point in looking
+ * deeper. Also remove any NULL constants in the top-level structure.
+ */
+ newqual = find_duplicate_ors(qual, is_check);
+
+ return newqual;
+}
+
+
+/*
+ * pull_ands
+ * Recursively flatten nested AND clauses into a single and-clause list.
+ *
+ * Input is the arglist of an AND clause.
+ * Returns the rebuilt arglist (note original list structure is not touched).
+ */
+static List *
+pull_ands(List *andlist)
+{
+ List *out_list = NIL;
+ ListCell *arg;
+
+ foreach(arg, andlist)
+ {
+ Node *subexpr = (Node *) lfirst(arg);
+
+ if (is_andclause(subexpr))
+ out_list = list_concat(out_list,
+ pull_ands(((BoolExpr *) subexpr)->args));
+ else
+ out_list = lappend(out_list, subexpr);
+ }
+ return out_list;
+}
+
+/*
+ * pull_ors
+ * Recursively flatten nested OR clauses into a single or-clause list.
+ *
+ * Input is the arglist of an OR clause.
+ * Returns the rebuilt arglist (note original list structure is not touched).
+ */
+static List *
+pull_ors(List *orlist)
+{
+ List *out_list = NIL;
+ ListCell *arg;
+
+ foreach(arg, orlist)
+ {
+ Node *subexpr = (Node *) lfirst(arg);
+
+ if (is_orclause(subexpr))
+ out_list = list_concat(out_list,
+ pull_ors(((BoolExpr *) subexpr)->args));
+ else
+ out_list = lappend(out_list, subexpr);
+ }
+ return out_list;
+}
+
+
+/*--------------------
+ * The following code attempts to apply the inverse OR distributive law:
+ * ((A AND B) OR (A AND C)) => (A AND (B OR C))
+ * That is, locate OR clauses in which every subclause contains an
+ * identical term, and pull out the duplicated terms.
+ *
+ * This may seem like a fairly useless activity, but it turns out to be
+ * applicable to many machine-generated queries, and there are also queries
+ * in some of the TPC benchmarks that need it. This was in fact almost the
+ * sole useful side-effect of the old prepqual code that tried to force
+ * the query into canonical AND-of-ORs form: the canonical equivalent of
+ * ((A AND B) OR (A AND C))
+ * is
+ * ((A OR A) AND (A OR C) AND (B OR A) AND (B OR C))
+ * which the code was able to simplify to
+ * (A AND (A OR C) AND (B OR A) AND (B OR C))
+ * thus successfully extracting the common condition A --- but at the cost
+ * of cluttering the qual with many redundant clauses.
+ *--------------------
+ */
+
+/*
+ * find_duplicate_ors
+ * Given a qualification tree with the NOTs pushed down, search for
+ * OR clauses to which the inverse OR distributive law might apply.
+ * Only the top-level AND/OR structure is searched.
+ *
+ * While at it, we remove any NULL constants within the top-level AND/OR
+ * structure, eg in a WHERE clause, "x OR NULL::boolean" is reduced to "x".
+ * In general that would change the result, so eval_const_expressions can't
+ * do it; but at top level of WHERE, we don't need to distinguish between
+ * FALSE and NULL results, so it's valid to treat NULL::boolean the same
+ * as FALSE and then simplify AND/OR accordingly. Conversely, in a top-level
+ * CHECK constraint, we may treat a NULL the same as TRUE.
+ *
+ * Returns the modified qualification. AND/OR flatness is preserved.
+ */
+static Expr *
+find_duplicate_ors(Expr *qual, bool is_check)
+{
+ if (is_orclause(qual))
+ {
+ List *orlist = NIL;
+ ListCell *temp;
+
+ /* Recurse */
+ foreach(temp, ((BoolExpr *) qual)->args)
+ {
+ Expr *arg = (Expr *) lfirst(temp);
+
+ arg = find_duplicate_ors(arg, is_check);
+
+ /* Get rid of any constant inputs */
+ if (arg && IsA(arg, Const))
+ {
+ Const *carg = (Const *) arg;
+
+ if (is_check)
+ {
+ /* Within OR in CHECK, drop constant FALSE */
+ if (!carg->constisnull && !DatumGetBool(carg->constvalue))
+ continue;
+ /* Constant TRUE or NULL, so OR reduces to TRUE */
+ return (Expr *) makeBoolConst(true, false);
+ }
+ else
+ {
+ /* Within OR in WHERE, drop constant FALSE or NULL */
+ if (carg->constisnull || !DatumGetBool(carg->constvalue))
+ continue;
+ /* Constant TRUE, so OR reduces to TRUE */
+ return arg;
+ }
+ }
+
+ orlist = lappend(orlist, arg);
+ }
+
+ /* Flatten any ORs pulled up to just below here */
+ orlist = pull_ors(orlist);
+
+ /* Now we can look for duplicate ORs */
+ return process_duplicate_ors(orlist);
+ }
+ else if (is_andclause(qual))
+ {
+ List *andlist = NIL;
+ ListCell *temp;
+
+ /* Recurse */
+ foreach(temp, ((BoolExpr *) qual)->args)
+ {
+ Expr *arg = (Expr *) lfirst(temp);
+
+ arg = find_duplicate_ors(arg, is_check);
+
+ /* Get rid of any constant inputs */
+ if (arg && IsA(arg, Const))
+ {
+ Const *carg = (Const *) arg;
+
+ if (is_check)
+ {
+ /* Within AND in CHECK, drop constant TRUE or NULL */
+ if (carg->constisnull || DatumGetBool(carg->constvalue))
+ continue;
+ /* Constant FALSE, so AND reduces to FALSE */
+ return arg;
+ }
+ else
+ {
+ /* Within AND in WHERE, drop constant TRUE */
+ if (!carg->constisnull && DatumGetBool(carg->constvalue))
+ continue;
+ /* Constant FALSE or NULL, so AND reduces to FALSE */
+ return (Expr *) makeBoolConst(false, false);
+ }
+ }
+
+ andlist = lappend(andlist, arg);
+ }
+
+ /* Flatten any ANDs introduced just below here */
+ andlist = pull_ands(andlist);
+
+ /* AND of no inputs reduces to TRUE */
+ if (andlist == NIL)
+ return (Expr *) makeBoolConst(true, false);
+
+ /* Single-expression AND just reduces to that expression */
+ if (list_length(andlist) == 1)
+ return (Expr *) linitial(andlist);
+
+ /* Else we still need an AND node */
+ return make_andclause(andlist);
+ }
+ else
+ return qual;
+}
+
+/*
+ * process_duplicate_ors
+ * Given a list of exprs which are ORed together, try to apply
+ * the inverse OR distributive law.
+ *
+ * Returns the resulting expression (could be an AND clause, an OR
+ * clause, or maybe even a single subexpression).
+ */
+static Expr *
+process_duplicate_ors(List *orlist)
+{
+ List *reference = NIL;
+ int num_subclauses = 0;
+ List *winners;
+ List *neworlist;
+ ListCell *temp;
+
+ /* OR of no inputs reduces to FALSE */
+ if (orlist == NIL)
+ return (Expr *) makeBoolConst(false, false);
+
+ /* Single-expression OR just reduces to that expression */
+ if (list_length(orlist) == 1)
+ return (Expr *) linitial(orlist);
+
+ /*
+ * Choose the shortest AND clause as the reference list --- obviously, any
+ * subclause not in this clause isn't in all the clauses. If we find a
+ * clause that's not an AND, we can treat it as a one-element AND clause,
+ * which necessarily wins as shortest.
+ */
+ foreach(temp, orlist)
+ {
+ Expr *clause = (Expr *) lfirst(temp);
+
+ if (is_andclause(clause))
+ {
+ List *subclauses = ((BoolExpr *) clause)->args;
+ int nclauses = list_length(subclauses);
+
+ if (reference == NIL || nclauses < num_subclauses)
+ {
+ reference = subclauses;
+ num_subclauses = nclauses;
+ }
+ }
+ else
+ {
+ reference = list_make1(clause);
+ break;
+ }
+ }
+
+ /*
+ * Just in case, eliminate any duplicates in the reference list.
+ */
+ reference = list_union(NIL, reference);
+
+ /*
+ * Check each element of the reference list to see if it's in all the OR
+ * clauses. Build a new list of winning clauses.
+ */
+ winners = NIL;
+ foreach(temp, reference)
+ {
+ Expr *refclause = (Expr *) lfirst(temp);
+ bool win = true;
+ ListCell *temp2;
+
+ foreach(temp2, orlist)
+ {
+ Expr *clause = (Expr *) lfirst(temp2);
+
+ if (is_andclause(clause))
+ {
+ if (!list_member(((BoolExpr *) clause)->args, refclause))
+ {
+ win = false;
+ break;
+ }
+ }
+ else
+ {
+ if (!equal(refclause, clause))
+ {
+ win = false;
+ break;
+ }
+ }
+ }
+
+ if (win)
+ winners = lappend(winners, refclause);
+ }
+
+ /*
+ * If no winners, we can't transform the OR
+ */
+ if (winners == NIL)
+ return make_orclause(orlist);
+
+ /*
+ * Generate new OR list consisting of the remaining sub-clauses.
+ *
+ * If any clause degenerates to empty, then we have a situation like (A
+ * AND B) OR (A), which can be reduced to just A --- that is, the
+ * additional conditions in other arms of the OR are irrelevant.
+ *
+ * Note that because we use list_difference, any multiple occurrences of a
+ * winning clause in an AND sub-clause will be removed automatically.
+ */
+ neworlist = NIL;
+ foreach(temp, orlist)
+ {
+ Expr *clause = (Expr *) lfirst(temp);
+
+ if (is_andclause(clause))
+ {
+ List *subclauses = ((BoolExpr *) clause)->args;
+
+ subclauses = list_difference(subclauses, winners);
+ if (subclauses != NIL)
+ {
+ if (list_length(subclauses) == 1)
+ neworlist = lappend(neworlist, linitial(subclauses));
+ else
+ neworlist = lappend(neworlist, make_andclause(subclauses));
+ }
+ else
+ {
+ neworlist = NIL; /* degenerate case, see above */
+ break;
+ }
+ }
+ else
+ {
+ if (!list_member(winners, clause))
+ neworlist = lappend(neworlist, clause);
+ else
+ {
+ neworlist = NIL; /* degenerate case, see above */
+ break;
+ }
+ }
+ }
+
+ /*
+ * Append reduced OR to the winners list, if it's not degenerate, handling
+ * the special case of one element correctly (can that really happen?).
+ * Also be careful to maintain AND/OR flatness in case we pulled up a
+ * sub-sub-OR-clause.
+ */
+ if (neworlist != NIL)
+ {
+ if (list_length(neworlist) == 1)
+ winners = lappend(winners, linitial(neworlist));
+ else
+ winners = lappend(winners, make_orclause(pull_ors(neworlist)));
+ }
+
+ /*
+ * And return the constructed AND clause, again being wary of a single
+ * element and AND/OR flatness.
+ */
+ if (list_length(winners) == 1)
+ return (Expr *) linitial(winners);
+ else
+ return make_andclause(pull_ands(winners));
+}
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
new file mode 100644
index 0000000..133966b
--- /dev/null
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -0,0 +1,497 @@
+/*-------------------------------------------------------------------------
+ *
+ * preptlist.c
+ * Routines to preprocess the parse tree target list
+ *
+ * For an INSERT, the targetlist must contain an entry for each attribute of
+ * the target relation in the correct order.
+ *
+ * For an UPDATE, the targetlist just contains the expressions for the new
+ * column values.
+ *
+ * For UPDATE and DELETE queries, the targetlist must also contain "junk"
+ * tlist entries needed to allow the executor to identify the rows to be
+ * updated or deleted; for example, the ctid of a heap row. (The planner
+ * adds these; they're not in what we receive from the planner/rewriter.)
+ *
+ * For all query types, there can be additional junk tlist entries, such as
+ * sort keys, Vars needed for a RETURNING list, and row ID information needed
+ * for SELECT FOR UPDATE locking and/or EvalPlanQual checking.
+ *
+ * The query rewrite phase also does preprocessing of the targetlist (see
+ * rewriteTargetListIU). The division of labor between here and there is
+ * partially historical, but it's not entirely arbitrary. The stuff done
+ * here is closely connected to physical access to tables, whereas the
+ * rewriter's work is more concerned with SQL semantics.
+ *
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/prep/preptlist.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/table.h"
+#include "nodes/makefuncs.h"
+#include "optimizer/appendinfo.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/prep.h"
+#include "optimizer/tlist.h"
+#include "parser/parse_coerce.h"
+#include "parser/parsetree.h"
+#include "utils/rel.h"
+
+static List *expand_insert_targetlist(List *tlist, Relation rel);
+
+
+/*
+ * preprocess_targetlist
+ * Driver for preprocessing the parse tree targetlist.
+ *
+ * The preprocessed targetlist is returned in root->processed_tlist.
+ * Also, if this is an UPDATE, we return a list of target column numbers
+ * in root->update_colnos. (Resnos in processed_tlist will be consecutive,
+ * so do not look at that to find out which columns are targets!)
+ */
+void
+preprocess_targetlist(PlannerInfo *root)
+{
+ Query *parse = root->parse;
+ int result_relation = parse->resultRelation;
+ List *range_table = parse->rtable;
+ CmdType command_type = parse->commandType;
+ RangeTblEntry *target_rte = NULL;
+ Relation target_relation = NULL;
+ List *tlist;
+ ListCell *lc;
+
+ /*
+ * If there is a result relation, open it so we can look for missing
+ * columns and so on. We assume that previous code already acquired at
+ * least AccessShareLock on the relation, so we need no lock here.
+ */
+ if (result_relation)
+ {
+ target_rte = rt_fetch(result_relation, range_table);
+
+ /*
+ * Sanity check: it'd better be a real relation not, say, a subquery.
+ * Else parser or rewriter messed up.
+ */
+ if (target_rte->rtekind != RTE_RELATION)
+ elog(ERROR, "result relation must be a regular relation");
+
+ target_relation = table_open(target_rte->relid, NoLock);
+ }
+ else
+ Assert(command_type == CMD_SELECT);
+
+ /*
+ * In an INSERT, the executor expects the targetlist to match the exact
+ * order of the target table's attributes, including entries for
+ * attributes not mentioned in the source query.
+ *
+ * In an UPDATE, we don't rearrange the tlist order, but we need to make a
+ * separate list of the target attribute numbers, in tlist order, and then
+ * renumber the processed_tlist entries to be consecutive.
+ */
+ tlist = parse->targetList;
+ if (command_type == CMD_INSERT)
+ tlist = expand_insert_targetlist(tlist, target_relation);
+ else if (command_type == CMD_UPDATE)
+ root->update_colnos = extract_update_targetlist_colnos(tlist);
+
+ /*
+ * For non-inherited UPDATE/DELETE/MERGE, register any junk column(s)
+ * needed to allow the executor to identify the rows to be updated or
+ * deleted. In the inheritance case, we do nothing now, leaving this to
+ * be dealt with when expand_inherited_rtentry() makes the leaf target
+ * relations. (But there might not be any leaf target relations, in which
+ * case we must do this in distribute_row_identity_vars().)
+ */
+ if ((command_type == CMD_UPDATE || command_type == CMD_DELETE ||
+ command_type == CMD_MERGE) &&
+ !target_rte->inh)
+ {
+ /* row-identity logic expects to add stuff to processed_tlist */
+ root->processed_tlist = tlist;
+ add_row_identity_columns(root, result_relation,
+ target_rte, target_relation);
+ tlist = root->processed_tlist;
+ }
+
+ /*
+ * For MERGE we also need to handle the target list for each INSERT and
+ * UPDATE action separately. In addition, we examine the qual of each
+ * action and add any Vars there (other than those of the target rel) to
+ * the subplan targetlist.
+ */
+ if (command_type == CMD_MERGE)
+ {
+ ListCell *l;
+
+ /*
+ * For MERGE, handle targetlist of each MergeAction separately. Give
+ * the same treatment to MergeAction->targetList as we would have
+ * given to a regular INSERT. For UPDATE, collect the column numbers
+ * being modified.
+ */
+ foreach(l, parse->mergeActionList)
+ {
+ MergeAction *action = (MergeAction *) lfirst(l);
+ List *vars;
+ ListCell *l2;
+
+ if (action->commandType == CMD_INSERT)
+ action->targetList = expand_insert_targetlist(action->targetList,
+ target_relation);
+ else if (action->commandType == CMD_UPDATE)
+ action->updateColnos =
+ extract_update_targetlist_colnos(action->targetList);
+
+ /*
+ * Add resjunk entries for any Vars and PlaceHolderVars used in
+ * each action's targetlist and WHEN condition that belong to
+ * relations other than the target. We don't expect to see any
+ * aggregates or window functions here.
+ */
+ vars = pull_var_clause((Node *)
+ list_concat_copy((List *) action->qual,
+ action->targetList),
+ PVC_INCLUDE_PLACEHOLDERS);
+ foreach(l2, vars)
+ {
+ Var *var = (Var *) lfirst(l2);
+ TargetEntry *tle;
+
+ if (IsA(var, Var) && var->varno == result_relation)
+ continue; /* don't need it */
+
+ if (tlist_member((Expr *) var, tlist))
+ continue; /* already got it */
+
+ tle = makeTargetEntry((Expr *) var,
+ list_length(tlist) + 1,
+ NULL, true);
+ tlist = lappend(tlist, tle);
+ }
+ list_free(vars);
+ }
+ }
+
+ /*
+ * Add necessary junk columns for rowmarked rels. These values are needed
+ * for locking of rels selected FOR UPDATE/SHARE, and to do EvalPlanQual
+ * rechecking. See comments for PlanRowMark in plannodes.h. If you
+ * change this stanza, see also expand_inherited_rtentry(), which has to
+ * be able to add on junk columns equivalent to these.
+ *
+ * (Someday it might be useful to fold these resjunk columns into the
+ * row-identity-column management used for UPDATE/DELETE. Today is not
+ * that day, however. One notable issue is that it seems important that
+ * the whole-row Vars made here use the real table rowtype, not RECORD, so
+ * that conversion to/from child relations' rowtypes will happen. Also,
+ * since these entries don't potentially bloat with more and more child
+ * relations, there's not really much need for column sharing.)
+ */
+ foreach(lc, root->rowMarks)
+ {
+ PlanRowMark *rc = (PlanRowMark *) lfirst(lc);
+ Var *var;
+ char resname[32];
+ TargetEntry *tle;
+
+ /* child rels use the same junk attrs as their parents */
+ if (rc->rti != rc->prti)
+ continue;
+
+ if (rc->allMarkTypes & ~(1 << ROW_MARK_COPY))
+ {
+ /* Need to fetch TID */
+ var = makeVar(rc->rti,
+ SelfItemPointerAttributeNumber,
+ TIDOID,
+ -1,
+ InvalidOid,
+ 0);
+ snprintf(resname, sizeof(resname), "ctid%u", rc->rowmarkId);
+ tle = makeTargetEntry((Expr *) var,
+ list_length(tlist) + 1,
+ pstrdup(resname),
+ true);
+ tlist = lappend(tlist, tle);
+ }
+ if (rc->allMarkTypes & (1 << ROW_MARK_COPY))
+ {
+ /* Need the whole row as a junk var */
+ var = makeWholeRowVar(rt_fetch(rc->rti, range_table),
+ rc->rti,
+ 0,
+ false);
+ snprintf(resname, sizeof(resname), "wholerow%u", rc->rowmarkId);
+ tle = makeTargetEntry((Expr *) var,
+ list_length(tlist) + 1,
+ pstrdup(resname),
+ true);
+ tlist = lappend(tlist, tle);
+ }
+
+ /* If parent of inheritance tree, always fetch the tableoid too. */
+ if (rc->isParent)
+ {
+ var = makeVar(rc->rti,
+ TableOidAttributeNumber,
+ OIDOID,
+ -1,
+ InvalidOid,
+ 0);
+ snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId);
+ tle = makeTargetEntry((Expr *) var,
+ list_length(tlist) + 1,
+ pstrdup(resname),
+ true);
+ tlist = lappend(tlist, tle);
+ }
+ }
+
+ /*
+ * If the query has a RETURNING list, add resjunk entries for any Vars
+ * used in RETURNING that belong to other relations. We need to do this
+ * to make these Vars available for the RETURNING calculation. Vars that
+ * belong to the result rel don't need to be added, because they will be
+ * made to refer to the actual heap tuple.
+ */
+ if (parse->returningList && list_length(parse->rtable) > 1)
+ {
+ List *vars;
+ ListCell *l;
+
+ vars = pull_var_clause((Node *) parse->returningList,
+ PVC_RECURSE_AGGREGATES |
+ PVC_RECURSE_WINDOWFUNCS |
+ PVC_INCLUDE_PLACEHOLDERS);
+ foreach(l, vars)
+ {
+ Var *var = (Var *) lfirst(l);
+ TargetEntry *tle;
+
+ if (IsA(var, Var) &&
+ var->varno == result_relation)
+ continue; /* don't need it */
+
+ if (tlist_member((Expr *) var, tlist))
+ continue; /* already got it */
+
+ tle = makeTargetEntry((Expr *) var,
+ list_length(tlist) + 1,
+ NULL,
+ true);
+
+ tlist = lappend(tlist, tle);
+ }
+ list_free(vars);
+ }
+
+ root->processed_tlist = tlist;
+
+ if (target_relation)
+ table_close(target_relation, NoLock);
+}
+
+/*
+ * extract_update_targetlist_colnos
+ * Extract a list of the target-table column numbers that
+ * an UPDATE's targetlist wants to assign to, then renumber.
+ *
+ * The convention in the parser and rewriter is that the resnos in an
+ * UPDATE's non-resjunk TLE entries are the target column numbers
+ * to assign to. Here, we extract that info into a separate list, and
+ * then convert the tlist to the sequential-numbering convention that's
+ * used by all other query types.
+ *
+ * This is also applied to the tlist associated with INSERT ... ON CONFLICT
+ * ... UPDATE, although not till much later in planning.
+ */
+List *
+extract_update_targetlist_colnos(List *tlist)
+{
+ List *update_colnos = NIL;
+ AttrNumber nextresno = 1;
+ ListCell *lc;
+
+ foreach(lc, tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lc);
+
+ if (!tle->resjunk)
+ update_colnos = lappend_int(update_colnos, tle->resno);
+ tle->resno = nextresno++;
+ }
+ return update_colnos;
+}
+
+
+/*****************************************************************************
+ *
+ * TARGETLIST EXPANSION
+ *
+ *****************************************************************************/
+
+/*
+ * expand_insert_targetlist
+ * Given a target list as generated by the parser and a result relation,
+ * add targetlist entries for any missing attributes, and ensure the
+ * non-junk attributes appear in proper field order.
+ *
+ * Once upon a time we also did more or less this with UPDATE targetlists,
+ * but now this code is only applied to INSERT targetlists.
+ */
+static List *
+expand_insert_targetlist(List *tlist, Relation rel)
+{
+ List *new_tlist = NIL;
+ ListCell *tlist_item;
+ int attrno,
+ numattrs;
+
+ tlist_item = list_head(tlist);
+
+ /*
+ * The rewriter should have already ensured that the TLEs are in correct
+ * order; but we have to insert TLEs for any missing attributes.
+ *
+ * Scan the tuple description in the relation's relcache entry to make
+ * sure we have all the user attributes in the right order.
+ */
+ numattrs = RelationGetNumberOfAttributes(rel);
+
+ for (attrno = 1; attrno <= numattrs; attrno++)
+ {
+ Form_pg_attribute att_tup = TupleDescAttr(rel->rd_att, attrno - 1);
+ TargetEntry *new_tle = NULL;
+
+ if (tlist_item != NULL)
+ {
+ TargetEntry *old_tle = (TargetEntry *) lfirst(tlist_item);
+
+ if (!old_tle->resjunk && old_tle->resno == attrno)
+ {
+ new_tle = old_tle;
+ tlist_item = lnext(tlist, tlist_item);
+ }
+ }
+
+ if (new_tle == NULL)
+ {
+ /*
+ * Didn't find a matching tlist entry, so make one.
+ *
+ * INSERTs should insert NULL in this case. (We assume the
+ * rewriter would have inserted any available non-NULL default
+ * value.) Also, if the column isn't dropped, apply any domain
+ * constraints that might exist --- this is to catch domain NOT
+ * NULL.
+ *
+ * When generating a NULL constant for a dropped column, we label
+ * it INT4 (any other guaranteed-to-exist datatype would do as
+ * well). We can't label it with the dropped column's datatype
+ * since that might not exist anymore. It does not really matter
+ * what we claim the type is, since NULL is NULL --- its
+ * representation is datatype-independent. This could perhaps
+ * confuse code comparing the finished plan to the target
+ * relation, however.
+ */
+ Oid atttype = att_tup->atttypid;
+ Oid attcollation = att_tup->attcollation;
+ Node *new_expr;
+
+ if (!att_tup->attisdropped)
+ {
+ new_expr = (Node *) makeConst(atttype,
+ -1,
+ attcollation,
+ att_tup->attlen,
+ (Datum) 0,
+ true, /* isnull */
+ att_tup->attbyval);
+ new_expr = coerce_to_domain(new_expr,
+ InvalidOid, -1,
+ atttype,
+ COERCION_IMPLICIT,
+ COERCE_IMPLICIT_CAST,
+ -1,
+ false);
+ }
+ else
+ {
+ /* Insert NULL for dropped column */
+ new_expr = (Node *) makeConst(INT4OID,
+ -1,
+ InvalidOid,
+ sizeof(int32),
+ (Datum) 0,
+ true, /* isnull */
+ true /* byval */ );
+ }
+
+ new_tle = makeTargetEntry((Expr *) new_expr,
+ attrno,
+ pstrdup(NameStr(att_tup->attname)),
+ false);
+ }
+
+ new_tlist = lappend(new_tlist, new_tle);
+ }
+
+ /*
+ * The remaining tlist entries should be resjunk; append them all to the
+ * end of the new tlist, making sure they have resnos higher than the last
+ * real attribute. (Note: although the rewriter already did such
+ * renumbering, we have to do it again here in case we added NULL entries
+ * above.)
+ */
+ while (tlist_item)
+ {
+ TargetEntry *old_tle = (TargetEntry *) lfirst(tlist_item);
+
+ if (!old_tle->resjunk)
+ elog(ERROR, "targetlist is not sorted correctly");
+ /* Get the resno right, but don't copy unnecessarily */
+ if (old_tle->resno != attrno)
+ {
+ old_tle = flatCopyTargetEntry(old_tle);
+ old_tle->resno = attrno;
+ }
+ new_tlist = lappend(new_tlist, old_tle);
+ attrno++;
+ tlist_item = lnext(tlist, tlist_item);
+ }
+
+ return new_tlist;
+}
+
+
+/*
+ * Locate PlanRowMark for given RT index, or return NULL if none
+ *
+ * This probably ought to be elsewhere, but there's no very good place
+ */
+PlanRowMark *
+get_plan_rowmark(List *rowmarks, Index rtindex)
+{
+ ListCell *l;
+
+ foreach(l, rowmarks)
+ {
+ PlanRowMark *rc = (PlanRowMark *) lfirst(l);
+
+ if (rc->rti == rtindex)
+ return rc;
+ }
+ return NULL;
+}
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
new file mode 100644
index 0000000..f004fad
--- /dev/null
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -0,0 +1,1420 @@
+/*-------------------------------------------------------------------------
+ *
+ * prepunion.c
+ * Routines to plan set-operation queries. The filename is a leftover
+ * from a time when only UNIONs were implemented.
+ *
+ * There are two code paths in the planner for set-operation queries.
+ * If a subquery consists entirely of simple UNION ALL operations, it
+ * is converted into an "append relation". Otherwise, it is handled
+ * by the general code in this module (plan_set_operations and its
+ * subroutines). There is some support code here for the append-relation
+ * case, but most of the heavy lifting for that is done elsewhere,
+ * notably in prepjointree.c and allpaths.c.
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/prep/prepunion.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "access/sysattr.h"
+#include "catalog/partition.h"
+#include "catalog/pg_inherits.h"
+#include "catalog/pg_type.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/cost.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/planmain.h"
+#include "optimizer/planner.h"
+#include "optimizer/prep.h"
+#include "optimizer/tlist.h"
+#include "parser/parse_coerce.h"
+#include "parser/parsetree.h"
+#include "utils/lsyscache.h"
+#include "utils/rel.h"
+#include "utils/selfuncs.h"
+#include "utils/syscache.h"
+
+
+static RelOptInfo *recurse_set_operations(Node *setOp, PlannerInfo *root,
+ List *colTypes, List *colCollations,
+ bool junkOK,
+ int flag, List *refnames_tlist,
+ List **pTargetList,
+ double *pNumGroups);
+static RelOptInfo *generate_recursion_path(SetOperationStmt *setOp,
+ PlannerInfo *root,
+ List *refnames_tlist,
+ List **pTargetList);
+static RelOptInfo *generate_union_paths(SetOperationStmt *op, PlannerInfo *root,
+ List *refnames_tlist,
+ List **pTargetList);
+static RelOptInfo *generate_nonunion_paths(SetOperationStmt *op, PlannerInfo *root,
+ List *refnames_tlist,
+ List **pTargetList);
+static List *plan_union_children(PlannerInfo *root,
+ SetOperationStmt *top_union,
+ List *refnames_tlist,
+ List **tlist_list);
+static Path *make_union_unique(SetOperationStmt *op, Path *path, List *tlist,
+ PlannerInfo *root);
+static void postprocess_setop_rel(PlannerInfo *root, RelOptInfo *rel);
+static bool choose_hashed_setop(PlannerInfo *root, List *groupClauses,
+ Path *input_path,
+ double dNumGroups, double dNumOutputRows,
+ const char *construct);
+static List *generate_setop_tlist(List *colTypes, List *colCollations,
+ int flag,
+ Index varno,
+ bool hack_constants,
+ List *input_tlist,
+ List *refnames_tlist);
+static List *generate_append_tlist(List *colTypes, List *colCollations,
+ bool flag,
+ List *input_tlists,
+ List *refnames_tlist);
+static List *generate_setop_grouplist(SetOperationStmt *op, List *targetlist);
+
+
+/*
+ * plan_set_operations
+ *
+ * Plans the queries for a tree of set operations (UNION/INTERSECT/EXCEPT)
+ *
+ * This routine only deals with the setOperations tree of the given query.
+ * Any top-level ORDER BY requested in root->parse->sortClause will be handled
+ * when we return to grouping_planner; likewise for LIMIT.
+ *
+ * What we return is an "upperrel" RelOptInfo containing at least one Path
+ * that implements the set-operation tree. In addition, root->processed_tlist
+ * receives a targetlist representing the output of the topmost setop node.
+ */
+RelOptInfo *
+plan_set_operations(PlannerInfo *root)
+{
+ Query *parse = root->parse;
+ SetOperationStmt *topop = castNode(SetOperationStmt, parse->setOperations);
+ Node *node;
+ RangeTblEntry *leftmostRTE;
+ Query *leftmostQuery;
+ RelOptInfo *setop_rel;
+ List *top_tlist;
+
+ Assert(topop);
+
+ /* check for unsupported stuff */
+ Assert(parse->jointree->fromlist == NIL);
+ Assert(parse->jointree->quals == NULL);
+ Assert(parse->groupClause == NIL);
+ Assert(parse->havingQual == NULL);
+ Assert(parse->windowClause == NIL);
+ Assert(parse->distinctClause == NIL);
+
+ /*
+ * In the outer query level, we won't have any true equivalences to deal
+ * with; but we do want to be able to make pathkeys, which will require
+ * single-member EquivalenceClasses. Indicate that EC merging is complete
+ * so that pathkeys.c won't complain.
+ */
+ Assert(root->eq_classes == NIL);
+ root->ec_merging_done = true;
+
+ /*
+ * We'll need to build RelOptInfos for each of the leaf subqueries, which
+ * are RTE_SUBQUERY rangetable entries in this Query. Prepare the index
+ * arrays for those, and for AppendRelInfos in case they're needed.
+ */
+ setup_simple_rel_arrays(root);
+
+ /*
+ * Find the leftmost component Query. We need to use its column names for
+ * all generated tlists (else SELECT INTO won't work right).
+ */
+ node = topop->larg;
+ while (node && IsA(node, SetOperationStmt))
+ node = ((SetOperationStmt *) node)->larg;
+ Assert(node && IsA(node, RangeTblRef));
+ leftmostRTE = root->simple_rte_array[((RangeTblRef *) node)->rtindex];
+ leftmostQuery = leftmostRTE->subquery;
+ Assert(leftmostQuery != NULL);
+
+ /*
+ * If the topmost node is a recursive union, it needs special processing.
+ */
+ if (root->hasRecursion)
+ {
+ setop_rel = generate_recursion_path(topop, root,
+ leftmostQuery->targetList,
+ &top_tlist);
+ }
+ else
+ {
+ /*
+ * Recurse on setOperations tree to generate paths for set ops. The
+ * final output paths should have just the column types shown as the
+ * output from the top-level node, plus possibly resjunk working
+ * columns (we can rely on upper-level nodes to deal with that).
+ */
+ setop_rel = recurse_set_operations((Node *) topop, root,
+ topop->colTypes, topop->colCollations,
+ true, -1,
+ leftmostQuery->targetList,
+ &top_tlist,
+ NULL);
+ }
+
+ /* Must return the built tlist into root->processed_tlist. */
+ root->processed_tlist = top_tlist;
+
+ return setop_rel;
+}
+
+/*
+ * recurse_set_operations
+ * Recursively handle one step in a tree of set operations
+ *
+ * colTypes: OID list of set-op's result column datatypes
+ * colCollations: OID list of set-op's result column collations
+ * junkOK: if true, child resjunk columns may be left in the result
+ * flag: if >= 0, add a resjunk output column indicating value of flag
+ * refnames_tlist: targetlist to take column names from
+ *
+ * Returns a RelOptInfo for the subtree, as well as these output parameters:
+ * *pTargetList: receives the fully-fledged tlist for the subtree's top plan
+ * *pNumGroups: if not NULL, we estimate the number of distinct groups
+ * in the result, and store it there
+ *
+ * The pTargetList output parameter is mostly redundant with the pathtarget
+ * of the returned RelOptInfo, but for the moment we need it because much of
+ * the logic in this file depends on flag columns being marked resjunk.
+ * Pending a redesign of how that works, this is the easy way out.
+ *
+ * We don't have to care about typmods here: the only allowed difference
+ * between set-op input and output typmods is input is a specific typmod
+ * and output is -1, and that does not require a coercion.
+ */
+static RelOptInfo *
+recurse_set_operations(Node *setOp, PlannerInfo *root,
+ List *colTypes, List *colCollations,
+ bool junkOK,
+ int flag, List *refnames_tlist,
+ List **pTargetList,
+ double *pNumGroups)
+{
+ RelOptInfo *rel = NULL; /* keep compiler quiet */
+
+ /* Guard against stack overflow due to overly complex setop nests */
+ check_stack_depth();
+
+ if (IsA(setOp, RangeTblRef))
+ {
+ RangeTblRef *rtr = (RangeTblRef *) setOp;
+ RangeTblEntry *rte = root->simple_rte_array[rtr->rtindex];
+ Query *subquery = rte->subquery;
+ PlannerInfo *subroot;
+ RelOptInfo *final_rel;
+ Path *subpath;
+ Path *path;
+ List *tlist;
+
+ Assert(subquery != NULL);
+
+ /* Build a RelOptInfo for this leaf subquery. */
+ rel = build_simple_rel(root, rtr->rtindex, NULL);
+
+ /* plan_params should not be in use in current query level */
+ Assert(root->plan_params == NIL);
+
+ /* Generate a subroot and Paths for the subquery */
+ subroot = rel->subroot = subquery_planner(root->glob, subquery,
+ root,
+ false,
+ root->tuple_fraction);
+
+ /*
+ * It should not be possible for the primitive query to contain any
+ * cross-references to other primitive queries in the setop tree.
+ */
+ if (root->plan_params)
+ elog(ERROR, "unexpected outer reference in set operation subquery");
+
+ /* Figure out the appropriate target list for this subquery. */
+ tlist = generate_setop_tlist(colTypes, colCollations,
+ flag,
+ rtr->rtindex,
+ true,
+ subroot->processed_tlist,
+ refnames_tlist);
+ rel->reltarget = create_pathtarget(root, tlist);
+
+ /* Return the fully-fledged tlist to caller, too */
+ *pTargetList = tlist;
+
+ /*
+ * Mark rel with estimated output rows, width, etc. Note that we have
+ * to do this before generating outer-query paths, else
+ * cost_subqueryscan is not happy.
+ */
+ set_subquery_size_estimates(root, rel);
+
+ /*
+ * Since we may want to add a partial path to this relation, we must
+ * set its consider_parallel flag correctly.
+ */
+ final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
+ rel->consider_parallel = final_rel->consider_parallel;
+
+ /*
+ * For the moment, we consider only a single Path for the subquery.
+ * This should change soon (make it look more like
+ * set_subquery_pathlist).
+ */
+ subpath = get_cheapest_fractional_path(final_rel,
+ root->tuple_fraction);
+
+ /*
+ * Stick a SubqueryScanPath atop that.
+ *
+ * We don't bother to determine the subquery's output ordering since
+ * it won't be reflected in the set-op result anyhow; so just label
+ * the SubqueryScanPath with nil pathkeys. (XXX that should change
+ * soon too, likely.)
+ */
+ path = (Path *) create_subqueryscan_path(root, rel, subpath,
+ NIL, NULL);
+
+ add_path(rel, path);
+
+ /*
+ * If we have a partial path for the child relation, we can use that
+ * to build a partial path for this relation. But there's no point in
+ * considering any path but the cheapest.
+ */
+ if (rel->consider_parallel && bms_is_empty(rel->lateral_relids) &&
+ final_rel->partial_pathlist != NIL)
+ {
+ Path *partial_subpath;
+ Path *partial_path;
+
+ partial_subpath = linitial(final_rel->partial_pathlist);
+ partial_path = (Path *)
+ create_subqueryscan_path(root, rel, partial_subpath,
+ NIL, NULL);
+ add_partial_path(rel, partial_path);
+ }
+
+ /*
+ * Estimate number of groups if caller wants it. If the subquery used
+ * grouping or aggregation, its output is probably mostly unique
+ * anyway; otherwise do statistical estimation.
+ *
+ * XXX you don't really want to know about this: we do the estimation
+ * using the subquery's original targetlist expressions, not the
+ * subroot->processed_tlist which might seem more appropriate. The
+ * reason is that if the subquery is itself a setop, it may return a
+ * processed_tlist containing "varno 0" Vars generated by
+ * generate_append_tlist, and those would confuse estimate_num_groups
+ * mightily. We ought to get rid of the "varno 0" hack, but that
+ * requires a redesign of the parsetree representation of setops, so
+ * that there can be an RTE corresponding to each setop's output.
+ */
+ if (pNumGroups)
+ {
+ if (subquery->groupClause || subquery->groupingSets ||
+ subquery->distinctClause ||
+ subroot->hasHavingQual || subquery->hasAggs)
+ *pNumGroups = subpath->rows;
+ else
+ *pNumGroups = estimate_num_groups(subroot,
+ get_tlist_exprs(subquery->targetList, false),
+ subpath->rows,
+ NULL,
+ NULL);
+ }
+ }
+ else if (IsA(setOp, SetOperationStmt))
+ {
+ SetOperationStmt *op = (SetOperationStmt *) setOp;
+
+ /* UNIONs are much different from INTERSECT/EXCEPT */
+ if (op->op == SETOP_UNION)
+ rel = generate_union_paths(op, root,
+ refnames_tlist,
+ pTargetList);
+ else
+ rel = generate_nonunion_paths(op, root,
+ refnames_tlist,
+ pTargetList);
+ if (pNumGroups)
+ *pNumGroups = rel->rows;
+
+ /*
+ * If necessary, add a Result node to project the caller-requested
+ * output columns.
+ *
+ * XXX you don't really want to know about this: setrefs.c will apply
+ * fix_upper_expr() to the Result node's tlist. This would fail if the
+ * Vars generated by generate_setop_tlist() were not exactly equal()
+ * to the corresponding tlist entries of the subplan. However, since
+ * the subplan was generated by generate_union_paths() or
+ * generate_nonunion_paths(), and hence its tlist was generated by
+ * generate_append_tlist(), this will work. We just tell
+ * generate_setop_tlist() to use varno 0.
+ */
+ if (flag >= 0 ||
+ !tlist_same_datatypes(*pTargetList, colTypes, junkOK) ||
+ !tlist_same_collations(*pTargetList, colCollations, junkOK))
+ {
+ PathTarget *target;
+ ListCell *lc;
+
+ *pTargetList = generate_setop_tlist(colTypes, colCollations,
+ flag,
+ 0,
+ false,
+ *pTargetList,
+ refnames_tlist);
+ target = create_pathtarget(root, *pTargetList);
+
+ /* Apply projection to each path */
+ foreach(lc, rel->pathlist)
+ {
+ Path *subpath = (Path *) lfirst(lc);
+ Path *path;
+
+ Assert(subpath->param_info == NULL);
+ path = apply_projection_to_path(root, subpath->parent,
+ subpath, target);
+ /* If we had to add a Result, path is different from subpath */
+ if (path != subpath)
+ lfirst(lc) = path;
+ }
+
+ /* Apply projection to each partial path */
+ foreach(lc, rel->partial_pathlist)
+ {
+ Path *subpath = (Path *) lfirst(lc);
+ Path *path;
+
+ Assert(subpath->param_info == NULL);
+
+ /* avoid apply_projection_to_path, in case of multiple refs */
+ path = (Path *) create_projection_path(root, subpath->parent,
+ subpath, target);
+ lfirst(lc) = path;
+ }
+ }
+ }
+ else
+ {
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(setOp));
+ *pTargetList = NIL;
+ }
+
+ postprocess_setop_rel(root, rel);
+
+ return rel;
+}
+
+/*
+ * Generate paths for a recursive UNION node
+ */
+static RelOptInfo *
+generate_recursion_path(SetOperationStmt *setOp, PlannerInfo *root,
+ List *refnames_tlist,
+ List **pTargetList)
+{
+ RelOptInfo *result_rel;
+ Path *path;
+ RelOptInfo *lrel,
+ *rrel;
+ Path *lpath;
+ Path *rpath;
+ List *lpath_tlist;
+ List *rpath_tlist;
+ List *tlist;
+ List *groupList;
+ double dNumGroups;
+
+ /* Parser should have rejected other cases */
+ if (setOp->op != SETOP_UNION)
+ elog(ERROR, "only UNION queries can be recursive");
+ /* Worktable ID should be assigned */
+ Assert(root->wt_param_id >= 0);
+
+ /*
+ * Unlike a regular UNION node, process the left and right inputs
+ * separately without any intention of combining them into one Append.
+ */
+ lrel = recurse_set_operations(setOp->larg, root,
+ setOp->colTypes, setOp->colCollations,
+ false, -1,
+ refnames_tlist,
+ &lpath_tlist,
+ NULL);
+ lpath = lrel->cheapest_total_path;
+ /* The right path will want to look at the left one ... */
+ root->non_recursive_path = lpath;
+ rrel = recurse_set_operations(setOp->rarg, root,
+ setOp->colTypes, setOp->colCollations,
+ false, -1,
+ refnames_tlist,
+ &rpath_tlist,
+ NULL);
+ rpath = rrel->cheapest_total_path;
+ root->non_recursive_path = NULL;
+
+ /*
+ * Generate tlist for RecursiveUnion path node --- same as in Append cases
+ */
+ tlist = generate_append_tlist(setOp->colTypes, setOp->colCollations, false,
+ list_make2(lpath_tlist, rpath_tlist),
+ refnames_tlist);
+
+ *pTargetList = tlist;
+
+ /* Build result relation. */
+ result_rel = fetch_upper_rel(root, UPPERREL_SETOP,
+ bms_union(lrel->relids, rrel->relids));
+ result_rel->reltarget = create_pathtarget(root, tlist);
+
+ /*
+ * If UNION, identify the grouping operators
+ */
+ if (setOp->all)
+ {
+ groupList = NIL;
+ dNumGroups = 0;
+ }
+ else
+ {
+ /* Identify the grouping semantics */
+ groupList = generate_setop_grouplist(setOp, tlist);
+
+ /* We only support hashing here */
+ if (!grouping_is_hashable(groupList))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("could not implement recursive UNION"),
+ errdetail("All column datatypes must be hashable.")));
+
+ /*
+ * For the moment, take the number of distinct groups as equal to the
+ * total input size, ie, the worst case.
+ */
+ dNumGroups = lpath->rows + rpath->rows * 10;
+ }
+
+ /*
+ * And make the path node.
+ */
+ path = (Path *) create_recursiveunion_path(root,
+ result_rel,
+ lpath,
+ rpath,
+ result_rel->reltarget,
+ groupList,
+ root->wt_param_id,
+ dNumGroups);
+
+ add_path(result_rel, path);
+ postprocess_setop_rel(root, result_rel);
+ return result_rel;
+}
+
+/*
+ * Generate paths for a UNION or UNION ALL node
+ */
+static RelOptInfo *
+generate_union_paths(SetOperationStmt *op, PlannerInfo *root,
+ List *refnames_tlist,
+ List **pTargetList)
+{
+ Relids relids = NULL;
+ RelOptInfo *result_rel;
+ double save_fraction = root->tuple_fraction;
+ ListCell *lc;
+ List *pathlist = NIL;
+ List *partial_pathlist = NIL;
+ bool partial_paths_valid = true;
+ bool consider_parallel = true;
+ List *rellist;
+ List *tlist_list;
+ List *tlist;
+ Path *path;
+
+ /*
+ * If plain UNION, tell children to fetch all tuples.
+ *
+ * Note: in UNION ALL, we pass the top-level tuple_fraction unmodified to
+ * each arm of the UNION ALL. One could make a case for reducing the
+ * tuple fraction for later arms (discounting by the expected size of the
+ * earlier arms' results) but it seems not worth the trouble. The normal
+ * case where tuple_fraction isn't already zero is a LIMIT at top level,
+ * and passing it down as-is is usually enough to get the desired result
+ * of preferring fast-start plans.
+ */
+ if (!op->all)
+ root->tuple_fraction = 0.0;
+
+ /*
+ * If any of my children are identical UNION nodes (same op, all-flag, and
+ * colTypes) then they can be merged into this node so that we generate
+ * only one Append and unique-ification for the lot. Recurse to find such
+ * nodes and compute their children's paths.
+ */
+ rellist = plan_union_children(root, op, refnames_tlist, &tlist_list);
+
+ /*
+ * Generate tlist for Append plan node.
+ *
+ * The tlist for an Append plan isn't important as far as the Append is
+ * concerned, but we must make it look real anyway for the benefit of the
+ * next plan level up.
+ */
+ tlist = generate_append_tlist(op->colTypes, op->colCollations, false,
+ tlist_list, refnames_tlist);
+
+ *pTargetList = tlist;
+
+ /* Build path lists and relid set. */
+ foreach(lc, rellist)
+ {
+ RelOptInfo *rel = lfirst(lc);
+
+ pathlist = lappend(pathlist, rel->cheapest_total_path);
+
+ if (consider_parallel)
+ {
+ if (!rel->consider_parallel)
+ {
+ consider_parallel = false;
+ partial_paths_valid = false;
+ }
+ else if (rel->partial_pathlist == NIL)
+ partial_paths_valid = false;
+ else
+ partial_pathlist = lappend(partial_pathlist,
+ linitial(rel->partial_pathlist));
+ }
+
+ relids = bms_union(relids, rel->relids);
+ }
+
+ /* Build result relation. */
+ result_rel = fetch_upper_rel(root, UPPERREL_SETOP, relids);
+ result_rel->reltarget = create_pathtarget(root, tlist);
+ result_rel->consider_parallel = consider_parallel;
+
+ /*
+ * Append the child results together.
+ */
+ path = (Path *) create_append_path(root, result_rel, pathlist, NIL,
+ NIL, NULL, 0, false, -1);
+
+ /*
+ * For UNION ALL, we just need the Append path. For UNION, need to add
+ * node(s) to remove duplicates.
+ */
+ if (!op->all)
+ path = make_union_unique(op, path, tlist, root);
+
+ add_path(result_rel, path);
+
+ /*
+ * Estimate number of groups. For now we just assume the output is unique
+ * --- this is certainly true for the UNION case, and we want worst-case
+ * estimates anyway.
+ */
+ result_rel->rows = path->rows;
+
+ /*
+ * Now consider doing the same thing using the partial paths plus Append
+ * plus Gather.
+ */
+ if (partial_paths_valid)
+ {
+ Path *ppath;
+ ListCell *lc;
+ int parallel_workers = 0;
+
+ /* Find the highest number of workers requested for any subpath. */
+ foreach(lc, partial_pathlist)
+ {
+ Path *path = lfirst(lc);
+
+ parallel_workers = Max(parallel_workers, path->parallel_workers);
+ }
+ Assert(parallel_workers > 0);
+
+ /*
+ * If the use of parallel append is permitted, always request at least
+ * log2(# of children) paths. We assume it can be useful to have
+ * extra workers in this case because they will be spread out across
+ * the children. The precise formula is just a guess; see
+ * add_paths_to_append_rel.
+ */
+ if (enable_parallel_append)
+ {
+ parallel_workers = Max(parallel_workers,
+ fls(list_length(partial_pathlist)));
+ parallel_workers = Min(parallel_workers,
+ max_parallel_workers_per_gather);
+ }
+ Assert(parallel_workers > 0);
+
+ ppath = (Path *)
+ create_append_path(root, result_rel, NIL, partial_pathlist,
+ NIL, NULL,
+ parallel_workers, enable_parallel_append,
+ -1);
+ ppath = (Path *)
+ create_gather_path(root, result_rel, ppath,
+ result_rel->reltarget, NULL, NULL);
+ if (!op->all)
+ ppath = make_union_unique(op, ppath, tlist, root);
+ add_path(result_rel, ppath);
+ }
+
+ /* Undo effects of possibly forcing tuple_fraction to 0 */
+ root->tuple_fraction = save_fraction;
+
+ return result_rel;
+}
+
+/*
+ * Generate paths for an INTERSECT, INTERSECT ALL, EXCEPT, or EXCEPT ALL node
+ */
+static RelOptInfo *
+generate_nonunion_paths(SetOperationStmt *op, PlannerInfo *root,
+ List *refnames_tlist,
+ List **pTargetList)
+{
+ RelOptInfo *result_rel;
+ RelOptInfo *lrel,
+ *rrel;
+ double save_fraction = root->tuple_fraction;
+ Path *lpath,
+ *rpath,
+ *path;
+ List *lpath_tlist,
+ *rpath_tlist,
+ *tlist_list,
+ *tlist,
+ *groupList,
+ *pathlist;
+ double dLeftGroups,
+ dRightGroups,
+ dNumGroups,
+ dNumOutputRows;
+ bool use_hash;
+ SetOpCmd cmd;
+ int firstFlag;
+
+ /*
+ * Tell children to fetch all tuples.
+ */
+ root->tuple_fraction = 0.0;
+
+ /* Recurse on children, ensuring their outputs are marked */
+ lrel = recurse_set_operations(op->larg, root,
+ op->colTypes, op->colCollations,
+ false, 0,
+ refnames_tlist,
+ &lpath_tlist,
+ &dLeftGroups);
+ lpath = lrel->cheapest_total_path;
+ rrel = recurse_set_operations(op->rarg, root,
+ op->colTypes, op->colCollations,
+ false, 1,
+ refnames_tlist,
+ &rpath_tlist,
+ &dRightGroups);
+ rpath = rrel->cheapest_total_path;
+
+ /* Undo effects of forcing tuple_fraction to 0 */
+ root->tuple_fraction = save_fraction;
+
+ /*
+ * For EXCEPT, we must put the left input first. For INTERSECT, either
+ * order should give the same results, and we prefer to put the smaller
+ * input first in order to minimize the size of the hash table in the
+ * hashing case. "Smaller" means the one with the fewer groups.
+ */
+ if (op->op == SETOP_EXCEPT || dLeftGroups <= dRightGroups)
+ {
+ pathlist = list_make2(lpath, rpath);
+ tlist_list = list_make2(lpath_tlist, rpath_tlist);
+ firstFlag = 0;
+ }
+ else
+ {
+ pathlist = list_make2(rpath, lpath);
+ tlist_list = list_make2(rpath_tlist, lpath_tlist);
+ firstFlag = 1;
+ }
+
+ /*
+ * Generate tlist for Append plan node.
+ *
+ * The tlist for an Append plan isn't important as far as the Append is
+ * concerned, but we must make it look real anyway for the benefit of the
+ * next plan level up. In fact, it has to be real enough that the flag
+ * column is shown as a variable not a constant, else setrefs.c will get
+ * confused.
+ */
+ tlist = generate_append_tlist(op->colTypes, op->colCollations, true,
+ tlist_list, refnames_tlist);
+
+ *pTargetList = tlist;
+
+ /* Build result relation. */
+ result_rel = fetch_upper_rel(root, UPPERREL_SETOP,
+ bms_union(lrel->relids, rrel->relids));
+ result_rel->reltarget = create_pathtarget(root, tlist);
+
+ /*
+ * Append the child results together.
+ */
+ path = (Path *) create_append_path(root, result_rel, pathlist, NIL,
+ NIL, NULL, 0, false, -1);
+
+ /* Identify the grouping semantics */
+ groupList = generate_setop_grouplist(op, tlist);
+
+ /*
+ * Estimate number of distinct groups that we'll need hashtable entries
+ * for; this is the size of the left-hand input for EXCEPT, or the smaller
+ * input for INTERSECT. Also estimate the number of eventual output rows.
+ * In non-ALL cases, we estimate each group produces one output row; in
+ * ALL cases use the relevant relation size. These are worst-case
+ * estimates, of course, but we need to be conservative.
+ */
+ if (op->op == SETOP_EXCEPT)
+ {
+ dNumGroups = dLeftGroups;
+ dNumOutputRows = op->all ? lpath->rows : dNumGroups;
+ }
+ else
+ {
+ dNumGroups = Min(dLeftGroups, dRightGroups);
+ dNumOutputRows = op->all ? Min(lpath->rows, rpath->rows) : dNumGroups;
+ }
+
+ /*
+ * Decide whether to hash or sort, and add a sort node if needed.
+ */
+ use_hash = choose_hashed_setop(root, groupList, path,
+ dNumGroups, dNumOutputRows,
+ (op->op == SETOP_INTERSECT) ? "INTERSECT" : "EXCEPT");
+
+ if (groupList && !use_hash)
+ path = (Path *) create_sort_path(root,
+ result_rel,
+ path,
+ make_pathkeys_for_sortclauses(root,
+ groupList,
+ tlist),
+ -1.0);
+
+ /*
+ * Finally, add a SetOp path node to generate the correct output.
+ */
+ switch (op->op)
+ {
+ case SETOP_INTERSECT:
+ cmd = op->all ? SETOPCMD_INTERSECT_ALL : SETOPCMD_INTERSECT;
+ break;
+ case SETOP_EXCEPT:
+ cmd = op->all ? SETOPCMD_EXCEPT_ALL : SETOPCMD_EXCEPT;
+ break;
+ default:
+ elog(ERROR, "unrecognized set op: %d", (int) op->op);
+ cmd = SETOPCMD_INTERSECT; /* keep compiler quiet */
+ break;
+ }
+ path = (Path *) create_setop_path(root,
+ result_rel,
+ path,
+ cmd,
+ use_hash ? SETOP_HASHED : SETOP_SORTED,
+ groupList,
+ list_length(op->colTypes) + 1,
+ use_hash ? firstFlag : -1,
+ dNumGroups,
+ dNumOutputRows);
+
+ result_rel->rows = path->rows;
+ add_path(result_rel, path);
+ return result_rel;
+}
+
+/*
+ * Pull up children of a UNION node that are identically-propertied UNIONs.
+ *
+ * NOTE: we can also pull a UNION ALL up into a UNION, since the distinct
+ * output rows will be lost anyway.
+ *
+ * NOTE: currently, we ignore collations while determining if a child has
+ * the same properties. This is semantically sound only so long as all
+ * collations have the same notion of equality. It is valid from an
+ * implementation standpoint because we don't care about the ordering of
+ * a UNION child's result: UNION ALL results are always unordered, and
+ * generate_union_paths will force a fresh sort if the top level is a UNION.
+ */
+static List *
+plan_union_children(PlannerInfo *root,
+ SetOperationStmt *top_union,
+ List *refnames_tlist,
+ List **tlist_list)
+{
+ List *pending_rels = list_make1(top_union);
+ List *result = NIL;
+ List *child_tlist;
+
+ *tlist_list = NIL;
+
+ while (pending_rels != NIL)
+ {
+ Node *setOp = linitial(pending_rels);
+
+ pending_rels = list_delete_first(pending_rels);
+
+ if (IsA(setOp, SetOperationStmt))
+ {
+ SetOperationStmt *op = (SetOperationStmt *) setOp;
+
+ if (op->op == top_union->op &&
+ (op->all == top_union->all || op->all) &&
+ equal(op->colTypes, top_union->colTypes))
+ {
+ /* Same UNION, so fold children into parent */
+ pending_rels = lcons(op->rarg, pending_rels);
+ pending_rels = lcons(op->larg, pending_rels);
+ continue;
+ }
+ }
+
+ /*
+ * Not same, so plan this child separately.
+ *
+ * Note we disallow any resjunk columns in child results. This is
+ * necessary since the Append node that implements the union won't do
+ * any projection, and upper levels will get confused if some of our
+ * output tuples have junk and some don't. This case only arises when
+ * we have an EXCEPT or INTERSECT as child, else there won't be
+ * resjunk anyway.
+ */
+ result = lappend(result, recurse_set_operations(setOp, root,
+ top_union->colTypes,
+ top_union->colCollations,
+ false, -1,
+ refnames_tlist,
+ &child_tlist,
+ NULL));
+ *tlist_list = lappend(*tlist_list, child_tlist);
+ }
+
+ return result;
+}
+
+/*
+ * Add nodes to the given path tree to unique-ify the result of a UNION.
+ */
+static Path *
+make_union_unique(SetOperationStmt *op, Path *path, List *tlist,
+ PlannerInfo *root)
+{
+ RelOptInfo *result_rel = fetch_upper_rel(root, UPPERREL_SETOP, NULL);
+ List *groupList;
+ double dNumGroups;
+
+ /* Identify the grouping semantics */
+ groupList = generate_setop_grouplist(op, tlist);
+
+ /*
+ * XXX for the moment, take the number of distinct groups as equal to the
+ * total input size, ie, the worst case. This is too conservative, but
+ * it's not clear how to get a decent estimate of the true size. One
+ * should note as well the propensity of novices to write UNION rather
+ * than UNION ALL even when they don't expect any duplicates...
+ */
+ dNumGroups = path->rows;
+
+ /* Decide whether to hash or sort */
+ if (choose_hashed_setop(root, groupList, path,
+ dNumGroups, dNumGroups,
+ "UNION"))
+ {
+ /* Hashed aggregate plan --- no sort needed */
+ path = (Path *) create_agg_path(root,
+ result_rel,
+ path,
+ create_pathtarget(root, tlist),
+ AGG_HASHED,
+ AGGSPLIT_SIMPLE,
+ groupList,
+ NIL,
+ NULL,
+ dNumGroups);
+ }
+ else
+ {
+ /* Sort and Unique */
+ if (groupList)
+ path = (Path *)
+ create_sort_path(root,
+ result_rel,
+ path,
+ make_pathkeys_for_sortclauses(root,
+ groupList,
+ tlist),
+ -1.0);
+ path = (Path *) create_upper_unique_path(root,
+ result_rel,
+ path,
+ list_length(path->pathkeys),
+ dNumGroups);
+ }
+
+ return path;
+}
+
+/*
+ * postprocess_setop_rel - perform steps required after adding paths
+ */
+static void
+postprocess_setop_rel(PlannerInfo *root, RelOptInfo *rel)
+{
+ /*
+ * We don't currently worry about allowing FDWs to contribute paths to
+ * this relation, but give extensions a chance.
+ */
+ if (create_upper_paths_hook)
+ (*create_upper_paths_hook) (root, UPPERREL_SETOP,
+ NULL, rel, NULL);
+
+ /* Select cheapest path */
+ set_cheapest(rel);
+}
+
+/*
+ * choose_hashed_setop - should we use hashing for a set operation?
+ */
+static bool
+choose_hashed_setop(PlannerInfo *root, List *groupClauses,
+ Path *input_path,
+ double dNumGroups, double dNumOutputRows,
+ const char *construct)
+{
+ int numGroupCols = list_length(groupClauses);
+ Size hash_mem_limit = get_hash_memory_limit();
+ bool can_sort;
+ bool can_hash;
+ Size hashentrysize;
+ Path hashed_p;
+ Path sorted_p;
+ double tuple_fraction;
+
+ /* Check whether the operators support sorting or hashing */
+ can_sort = grouping_is_sortable(groupClauses);
+ can_hash = grouping_is_hashable(groupClauses);
+ if (can_hash && can_sort)
+ {
+ /* we have a meaningful choice to make, continue ... */
+ }
+ else if (can_hash)
+ return true;
+ else if (can_sort)
+ return false;
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ /* translator: %s is UNION, INTERSECT, or EXCEPT */
+ errmsg("could not implement %s", construct),
+ errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
+
+ /* Prefer sorting when enable_hashagg is off */
+ if (!enable_hashagg)
+ return false;
+
+ /*
+ * Don't do it if it doesn't look like the hashtable will fit into
+ * hash_mem.
+ */
+ hashentrysize = MAXALIGN(input_path->pathtarget->width) + MAXALIGN(SizeofMinimalTupleHeader);
+
+ if (hashentrysize * dNumGroups > hash_mem_limit)
+ return false;
+
+ /*
+ * See if the estimated cost is no more than doing it the other way.
+ *
+ * We need to consider input_plan + hashagg versus input_plan + sort +
+ * group. Note that the actual result plan might involve a SetOp or
+ * Unique node, not Agg or Group, but the cost estimates for Agg and Group
+ * should be close enough for our purposes here.
+ *
+ * These path variables are dummies that just hold cost fields; we don't
+ * make actual Paths for these steps.
+ */
+ cost_agg(&hashed_p, root, AGG_HASHED, NULL,
+ numGroupCols, dNumGroups,
+ NIL,
+ input_path->startup_cost, input_path->total_cost,
+ input_path->rows, input_path->pathtarget->width);
+
+ /*
+ * Now for the sorted case. Note that the input is *always* unsorted,
+ * since it was made by appending unrelated sub-relations together.
+ */
+ sorted_p.startup_cost = input_path->startup_cost;
+ sorted_p.total_cost = input_path->total_cost;
+ /* XXX cost_sort doesn't actually look at pathkeys, so just pass NIL */
+ cost_sort(&sorted_p, root, NIL, sorted_p.total_cost,
+ input_path->rows, input_path->pathtarget->width,
+ 0.0, work_mem, -1.0);
+ cost_group(&sorted_p, root, numGroupCols, dNumGroups,
+ NIL,
+ sorted_p.startup_cost, sorted_p.total_cost,
+ input_path->rows);
+
+ /*
+ * Now make the decision using the top-level tuple fraction. First we
+ * have to convert an absolute count (LIMIT) into fractional form.
+ */
+ tuple_fraction = root->tuple_fraction;
+ if (tuple_fraction >= 1.0)
+ tuple_fraction /= dNumOutputRows;
+
+ if (compare_fractional_path_costs(&hashed_p, &sorted_p,
+ tuple_fraction) < 0)
+ {
+ /* Hashed is cheaper, so use it */
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Generate targetlist for a set-operation plan node
+ *
+ * colTypes: OID list of set-op's result column datatypes
+ * colCollations: OID list of set-op's result column collations
+ * flag: -1 if no flag column needed, 0 or 1 to create a const flag column
+ * varno: varno to use in generated Vars
+ * hack_constants: true to copy up constants (see comments in code)
+ * input_tlist: targetlist of this node's input node
+ * refnames_tlist: targetlist to take column names from
+ */
+static List *
+generate_setop_tlist(List *colTypes, List *colCollations,
+ int flag,
+ Index varno,
+ bool hack_constants,
+ List *input_tlist,
+ List *refnames_tlist)
+{
+ List *tlist = NIL;
+ int resno = 1;
+ ListCell *ctlc,
+ *cclc,
+ *itlc,
+ *rtlc;
+ TargetEntry *tle;
+ Node *expr;
+
+ forfour(ctlc, colTypes, cclc, colCollations,
+ itlc, input_tlist, rtlc, refnames_tlist)
+ {
+ Oid colType = lfirst_oid(ctlc);
+ Oid colColl = lfirst_oid(cclc);
+ TargetEntry *inputtle = (TargetEntry *) lfirst(itlc);
+ TargetEntry *reftle = (TargetEntry *) lfirst(rtlc);
+
+ Assert(inputtle->resno == resno);
+ Assert(reftle->resno == resno);
+ Assert(!inputtle->resjunk);
+ Assert(!reftle->resjunk);
+
+ /*
+ * Generate columns referencing input columns and having appropriate
+ * data types and column names. Insert datatype coercions where
+ * necessary.
+ *
+ * HACK: constants in the input's targetlist are copied up as-is
+ * rather than being referenced as subquery outputs. This is mainly
+ * to ensure that when we try to coerce them to the output column's
+ * datatype, the right things happen for UNKNOWN constants. But do
+ * this only at the first level of subquery-scan plans; we don't want
+ * phony constants appearing in the output tlists of upper-level
+ * nodes!
+ */
+ if (hack_constants && inputtle->expr && IsA(inputtle->expr, Const))
+ expr = (Node *) inputtle->expr;
+ else
+ expr = (Node *) makeVar(varno,
+ inputtle->resno,
+ exprType((Node *) inputtle->expr),
+ exprTypmod((Node *) inputtle->expr),
+ exprCollation((Node *) inputtle->expr),
+ 0);
+
+ if (exprType(expr) != colType)
+ {
+ /*
+ * Note: it's not really cool to be applying coerce_to_common_type
+ * here; one notable point is that assign_expr_collations never
+ * gets run on any generated nodes. For the moment that's not a
+ * problem because we force the correct exposed collation below.
+ * It would likely be best to make the parser generate the correct
+ * output tlist for every set-op to begin with, though.
+ */
+ expr = coerce_to_common_type(NULL, /* no UNKNOWNs here */
+ expr,
+ colType,
+ "UNION/INTERSECT/EXCEPT");
+ }
+
+ /*
+ * Ensure the tlist entry's exposed collation matches the set-op. This
+ * is necessary because plan_set_operations() reports the result
+ * ordering as a list of SortGroupClauses, which don't carry collation
+ * themselves but just refer to tlist entries. If we don't show the
+ * right collation then planner.c might do the wrong thing in
+ * higher-level queries.
+ *
+ * Note we use RelabelType, not CollateExpr, since this expression
+ * will reach the executor without any further processing.
+ */
+ if (exprCollation(expr) != colColl)
+ expr = applyRelabelType(expr,
+ exprType(expr), exprTypmod(expr), colColl,
+ COERCE_IMPLICIT_CAST, -1, false);
+
+ tle = makeTargetEntry((Expr *) expr,
+ (AttrNumber) resno++,
+ pstrdup(reftle->resname),
+ false);
+
+ /*
+ * By convention, all non-resjunk columns in a setop tree have
+ * ressortgroupref equal to their resno. In some cases the ref isn't
+ * needed, but this is a cleaner way than modifying the tlist later.
+ */
+ tle->ressortgroupref = tle->resno;
+
+ tlist = lappend(tlist, tle);
+ }
+
+ if (flag >= 0)
+ {
+ /* Add a resjunk flag column */
+ /* flag value is the given constant */
+ expr = (Node *) makeConst(INT4OID,
+ -1,
+ InvalidOid,
+ sizeof(int32),
+ Int32GetDatum(flag),
+ false,
+ true);
+ tle = makeTargetEntry((Expr *) expr,
+ (AttrNumber) resno++,
+ pstrdup("flag"),
+ true);
+ tlist = lappend(tlist, tle);
+ }
+
+ return tlist;
+}
+
+/*
+ * Generate targetlist for a set-operation Append node
+ *
+ * colTypes: OID list of set-op's result column datatypes
+ * colCollations: OID list of set-op's result column collations
+ * flag: true to create a flag column copied up from subplans
+ * input_tlists: list of tlists for sub-plans of the Append
+ * refnames_tlist: targetlist to take column names from
+ *
+ * The entries in the Append's targetlist should always be simple Vars;
+ * we just have to make sure they have the right datatypes/typmods/collations.
+ * The Vars are always generated with varno 0.
+ *
+ * XXX a problem with the varno-zero approach is that set_pathtarget_cost_width
+ * cannot figure out a realistic width for the tlist we make here. But we
+ * ought to refactor this code to produce a PathTarget directly, anyway.
+ */
+static List *
+generate_append_tlist(List *colTypes, List *colCollations,
+ bool flag,
+ List *input_tlists,
+ List *refnames_tlist)
+{
+ List *tlist = NIL;
+ int resno = 1;
+ ListCell *curColType;
+ ListCell *curColCollation;
+ ListCell *ref_tl_item;
+ int colindex;
+ TargetEntry *tle;
+ Node *expr;
+ ListCell *tlistl;
+ int32 *colTypmods;
+
+ /*
+ * First extract typmods to use.
+ *
+ * If the inputs all agree on type and typmod of a particular column, use
+ * that typmod; else use -1.
+ */
+ colTypmods = (int32 *) palloc(list_length(colTypes) * sizeof(int32));
+
+ foreach(tlistl, input_tlists)
+ {
+ List *subtlist = (List *) lfirst(tlistl);
+ ListCell *subtlistl;
+
+ curColType = list_head(colTypes);
+ colindex = 0;
+ foreach(subtlistl, subtlist)
+ {
+ TargetEntry *subtle = (TargetEntry *) lfirst(subtlistl);
+
+ if (subtle->resjunk)
+ continue;
+ Assert(curColType != NULL);
+ if (exprType((Node *) subtle->expr) == lfirst_oid(curColType))
+ {
+ /* If first subplan, copy the typmod; else compare */
+ int32 subtypmod = exprTypmod((Node *) subtle->expr);
+
+ if (tlistl == list_head(input_tlists))
+ colTypmods[colindex] = subtypmod;
+ else if (subtypmod != colTypmods[colindex])
+ colTypmods[colindex] = -1;
+ }
+ else
+ {
+ /* types disagree, so force typmod to -1 */
+ colTypmods[colindex] = -1;
+ }
+ curColType = lnext(colTypes, curColType);
+ colindex++;
+ }
+ Assert(curColType == NULL);
+ }
+
+ /*
+ * Now we can build the tlist for the Append.
+ */
+ colindex = 0;
+ forthree(curColType, colTypes, curColCollation, colCollations,
+ ref_tl_item, refnames_tlist)
+ {
+ Oid colType = lfirst_oid(curColType);
+ int32 colTypmod = colTypmods[colindex++];
+ Oid colColl = lfirst_oid(curColCollation);
+ TargetEntry *reftle = (TargetEntry *) lfirst(ref_tl_item);
+
+ Assert(reftle->resno == resno);
+ Assert(!reftle->resjunk);
+ expr = (Node *) makeVar(0,
+ resno,
+ colType,
+ colTypmod,
+ colColl,
+ 0);
+ tle = makeTargetEntry((Expr *) expr,
+ (AttrNumber) resno++,
+ pstrdup(reftle->resname),
+ false);
+
+ /*
+ * By convention, all non-resjunk columns in a setop tree have
+ * ressortgroupref equal to their resno. In some cases the ref isn't
+ * needed, but this is a cleaner way than modifying the tlist later.
+ */
+ tle->ressortgroupref = tle->resno;
+
+ tlist = lappend(tlist, tle);
+ }
+
+ if (flag)
+ {
+ /* Add a resjunk flag column */
+ /* flag value is shown as copied up from subplan */
+ expr = (Node *) makeVar(0,
+ resno,
+ INT4OID,
+ -1,
+ InvalidOid,
+ 0);
+ tle = makeTargetEntry((Expr *) expr,
+ (AttrNumber) resno++,
+ pstrdup("flag"),
+ true);
+ tlist = lappend(tlist, tle);
+ }
+
+ pfree(colTypmods);
+
+ return tlist;
+}
+
+/*
+ * generate_setop_grouplist
+ * Build a SortGroupClause list defining the sort/grouping properties
+ * of the setop's output columns.
+ *
+ * Parse analysis already determined the properties and built a suitable
+ * list, except that the entries do not have sortgrouprefs set because
+ * the parser output representation doesn't include a tlist for each
+ * setop. So what we need to do here is copy that list and install
+ * proper sortgrouprefs into it (copying those from the targetlist).
+ */
+static List *
+generate_setop_grouplist(SetOperationStmt *op, List *targetlist)
+{
+ List *grouplist = copyObject(op->groupClauses);
+ ListCell *lg;
+ ListCell *lt;
+
+ lg = list_head(grouplist);
+ foreach(lt, targetlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lt);
+ SortGroupClause *sgc;
+
+ if (tle->resjunk)
+ {
+ /* resjunk columns should not have sortgrouprefs */
+ Assert(tle->ressortgroupref == 0);
+ continue; /* ignore resjunk columns */
+ }
+
+ /* non-resjunk columns should have sortgroupref = resno */
+ Assert(tle->ressortgroupref == tle->resno);
+
+ /* non-resjunk columns should have grouping clauses */
+ Assert(lg != NULL);
+ sgc = (SortGroupClause *) lfirst(lg);
+ lg = lnext(grouplist, lg);
+ Assert(sgc->tleSortGroupRef == 0);
+
+ sgc->tleSortGroupRef = tle->ressortgroupref;
+ }
+ Assert(lg == NULL);
+ return grouplist;
+}
diff --git a/src/backend/optimizer/util/Makefile b/src/backend/optimizer/util/Makefile
new file mode 100644
index 0000000..4fb115c
--- /dev/null
+++ b/src/backend/optimizer/util/Makefile
@@ -0,0 +1,31 @@
+#-------------------------------------------------------------------------
+#
+# Makefile--
+# Makefile for optimizer/util
+#
+# IDENTIFICATION
+# src/backend/optimizer/util/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/backend/optimizer/util
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+
+OBJS = \
+ appendinfo.o \
+ clauses.o \
+ inherit.o \
+ joininfo.o \
+ orclauses.o \
+ paramassign.o \
+ pathnode.o \
+ placeholder.o \
+ plancat.o \
+ predtest.o \
+ relnode.o \
+ restrictinfo.o \
+ tlist.o \
+ var.o
+
+include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/optimizer/util/appendinfo.c b/src/backend/optimizer/util/appendinfo.c
new file mode 100644
index 0000000..5c3d5a7
--- /dev/null
+++ b/src/backend/optimizer/util/appendinfo.c
@@ -0,0 +1,1011 @@
+/*-------------------------------------------------------------------------
+ *
+ * appendinfo.c
+ * Routines for mapping between append parent(s) and children
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/appendinfo.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "access/table.h"
+#include "foreign/fdwapi.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/appendinfo.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/planmain.h"
+#include "parser/parsetree.h"
+#include "utils/lsyscache.h"
+#include "utils/rel.h"
+#include "utils/syscache.h"
+
+
+typedef struct
+{
+ PlannerInfo *root;
+ int nappinfos;
+ AppendRelInfo **appinfos;
+} adjust_appendrel_attrs_context;
+
+static void make_inh_translation_list(Relation oldrelation,
+ Relation newrelation,
+ Index newvarno,
+ AppendRelInfo *appinfo);
+static Node *adjust_appendrel_attrs_mutator(Node *node,
+ adjust_appendrel_attrs_context *context);
+
+
+/*
+ * make_append_rel_info
+ * Build an AppendRelInfo for the parent-child pair
+ */
+AppendRelInfo *
+make_append_rel_info(Relation parentrel, Relation childrel,
+ Index parentRTindex, Index childRTindex)
+{
+ AppendRelInfo *appinfo = makeNode(AppendRelInfo);
+
+ appinfo->parent_relid = parentRTindex;
+ appinfo->child_relid = childRTindex;
+ appinfo->parent_reltype = parentrel->rd_rel->reltype;
+ appinfo->child_reltype = childrel->rd_rel->reltype;
+ make_inh_translation_list(parentrel, childrel, childRTindex, appinfo);
+ appinfo->parent_reloid = RelationGetRelid(parentrel);
+
+ return appinfo;
+}
+
+/*
+ * make_inh_translation_list
+ * Build the list of translations from parent Vars to child Vars for
+ * an inheritance child, as well as a reverse-translation array.
+ *
+ * The reverse-translation array has an entry for each child relation
+ * column, which is either the 1-based index of the corresponding parent
+ * column, or 0 if there's no match (that happens for dropped child columns,
+ * as well as child columns beyond those of the parent, which are allowed in
+ * traditional inheritance though not partitioning).
+ *
+ * For paranoia's sake, we match type/collation as well as attribute name.
+ */
+static void
+make_inh_translation_list(Relation oldrelation, Relation newrelation,
+ Index newvarno,
+ AppendRelInfo *appinfo)
+{
+ List *vars = NIL;
+ AttrNumber *pcolnos;
+ TupleDesc old_tupdesc = RelationGetDescr(oldrelation);
+ TupleDesc new_tupdesc = RelationGetDescr(newrelation);
+ Oid new_relid = RelationGetRelid(newrelation);
+ int oldnatts = old_tupdesc->natts;
+ int newnatts = new_tupdesc->natts;
+ int old_attno;
+ int new_attno = 0;
+
+ /* Initialize reverse-translation array with all entries zero */
+ appinfo->num_child_cols = newnatts;
+ appinfo->parent_colnos = pcolnos =
+ (AttrNumber *) palloc0(newnatts * sizeof(AttrNumber));
+
+ for (old_attno = 0; old_attno < oldnatts; old_attno++)
+ {
+ Form_pg_attribute att;
+ char *attname;
+ Oid atttypid;
+ int32 atttypmod;
+ Oid attcollation;
+
+ att = TupleDescAttr(old_tupdesc, old_attno);
+ if (att->attisdropped)
+ {
+ /* Just put NULL into this list entry */
+ vars = lappend(vars, NULL);
+ continue;
+ }
+ attname = NameStr(att->attname);
+ atttypid = att->atttypid;
+ atttypmod = att->atttypmod;
+ attcollation = att->attcollation;
+
+ /*
+ * When we are generating the "translation list" for the parent table
+ * of an inheritance set, no need to search for matches.
+ */
+ if (oldrelation == newrelation)
+ {
+ vars = lappend(vars, makeVar(newvarno,
+ (AttrNumber) (old_attno + 1),
+ atttypid,
+ atttypmod,
+ attcollation,
+ 0));
+ pcolnos[old_attno] = old_attno + 1;
+ continue;
+ }
+
+ /*
+ * Otherwise we have to search for the matching column by name.
+ * There's no guarantee it'll have the same column position, because
+ * of cases like ALTER TABLE ADD COLUMN and multiple inheritance.
+ * However, in simple cases, the relative order of columns is mostly
+ * the same in both relations, so try the column of newrelation that
+ * follows immediately after the one that we just found, and if that
+ * fails, let syscache handle it.
+ */
+ if (new_attno >= newnatts ||
+ (att = TupleDescAttr(new_tupdesc, new_attno))->attisdropped ||
+ strcmp(attname, NameStr(att->attname)) != 0)
+ {
+ HeapTuple newtup;
+
+ newtup = SearchSysCacheAttName(new_relid, attname);
+ if (!HeapTupleIsValid(newtup))
+ elog(ERROR, "could not find inherited attribute \"%s\" of relation \"%s\"",
+ attname, RelationGetRelationName(newrelation));
+ new_attno = ((Form_pg_attribute) GETSTRUCT(newtup))->attnum - 1;
+ Assert(new_attno >= 0 && new_attno < newnatts);
+ ReleaseSysCache(newtup);
+
+ att = TupleDescAttr(new_tupdesc, new_attno);
+ }
+
+ /* Found it, check type and collation match */
+ if (atttypid != att->atttypid || atttypmod != att->atttypmod)
+ elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's type",
+ attname, RelationGetRelationName(newrelation));
+ if (attcollation != att->attcollation)
+ elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's collation",
+ attname, RelationGetRelationName(newrelation));
+
+ vars = lappend(vars, makeVar(newvarno,
+ (AttrNumber) (new_attno + 1),
+ atttypid,
+ atttypmod,
+ attcollation,
+ 0));
+ pcolnos[new_attno] = old_attno + 1;
+ new_attno++;
+ }
+
+ appinfo->translated_vars = vars;
+}
+
+/*
+ * adjust_appendrel_attrs
+ * Copy the specified query or expression and translate Vars referring to a
+ * parent rel to refer to the corresponding child rel instead. We also
+ * update rtindexes appearing outside Vars, such as resultRelation and
+ * jointree relids.
+ *
+ * Note: this is only applied after conversion of sublinks to subplans,
+ * so we don't need to cope with recursion into sub-queries.
+ *
+ * Note: this is not hugely different from what pullup_replace_vars() does;
+ * maybe we should try to fold the two routines together.
+ */
+Node *
+adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos,
+ AppendRelInfo **appinfos)
+{
+ adjust_appendrel_attrs_context context;
+
+ context.root = root;
+ context.nappinfos = nappinfos;
+ context.appinfos = appinfos;
+
+ /* If there's nothing to adjust, don't call this function. */
+ Assert(nappinfos >= 1 && appinfos != NULL);
+
+ /* Should never be translating a Query tree. */
+ Assert(node == NULL || !IsA(node, Query));
+
+ return adjust_appendrel_attrs_mutator(node, &context);
+}
+
+static Node *
+adjust_appendrel_attrs_mutator(Node *node,
+ adjust_appendrel_attrs_context *context)
+{
+ AppendRelInfo **appinfos = context->appinfos;
+ int nappinfos = context->nappinfos;
+ int cnt;
+
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) copyObject(node);
+ AppendRelInfo *appinfo = NULL;
+
+ if (var->varlevelsup != 0)
+ return (Node *) var; /* no changes needed */
+
+ for (cnt = 0; cnt < nappinfos; cnt++)
+ {
+ if (var->varno == appinfos[cnt]->parent_relid)
+ {
+ appinfo = appinfos[cnt];
+ break;
+ }
+ }
+
+ if (appinfo)
+ {
+ var->varno = appinfo->child_relid;
+ /* it's now a generated Var, so drop any syntactic labeling */
+ var->varnosyn = 0;
+ var->varattnosyn = 0;
+ if (var->varattno > 0)
+ {
+ Node *newnode;
+
+ if (var->varattno > list_length(appinfo->translated_vars))
+ elog(ERROR, "attribute %d of relation \"%s\" does not exist",
+ var->varattno, get_rel_name(appinfo->parent_reloid));
+ newnode = copyObject(list_nth(appinfo->translated_vars,
+ var->varattno - 1));
+ if (newnode == NULL)
+ elog(ERROR, "attribute %d of relation \"%s\" does not exist",
+ var->varattno, get_rel_name(appinfo->parent_reloid));
+ return newnode;
+ }
+ else if (var->varattno == 0)
+ {
+ /*
+ * Whole-row Var: if we are dealing with named rowtypes, we
+ * can use a whole-row Var for the child table plus a coercion
+ * step to convert the tuple layout to the parent's rowtype.
+ * Otherwise we have to generate a RowExpr.
+ */
+ if (OidIsValid(appinfo->child_reltype))
+ {
+ Assert(var->vartype == appinfo->parent_reltype);
+ if (appinfo->parent_reltype != appinfo->child_reltype)
+ {
+ ConvertRowtypeExpr *r = makeNode(ConvertRowtypeExpr);
+
+ r->arg = (Expr *) var;
+ r->resulttype = appinfo->parent_reltype;
+ r->convertformat = COERCE_IMPLICIT_CAST;
+ r->location = -1;
+ /* Make sure the Var node has the right type ID, too */
+ var->vartype = appinfo->child_reltype;
+ return (Node *) r;
+ }
+ }
+ else
+ {
+ /*
+ * Build a RowExpr containing the translated variables.
+ *
+ * In practice var->vartype will always be RECORDOID here,
+ * so we need to come up with some suitable column names.
+ * We use the parent RTE's column names.
+ *
+ * Note: we can't get here for inheritance cases, so there
+ * is no need to worry that translated_vars might contain
+ * some dummy NULLs.
+ */
+ RowExpr *rowexpr;
+ List *fields;
+ RangeTblEntry *rte;
+
+ rte = rt_fetch(appinfo->parent_relid,
+ context->root->parse->rtable);
+ fields = copyObject(appinfo->translated_vars);
+ rowexpr = makeNode(RowExpr);
+ rowexpr->args = fields;
+ rowexpr->row_typeid = var->vartype;
+ rowexpr->row_format = COERCE_IMPLICIT_CAST;
+ rowexpr->colnames = copyObject(rte->eref->colnames);
+ rowexpr->location = -1;
+
+ return (Node *) rowexpr;
+ }
+ }
+ /* system attributes don't need any other translation */
+ }
+ else if (var->varno == ROWID_VAR)
+ {
+ /*
+ * If it's a ROWID_VAR placeholder, see if we've reached a leaf
+ * target rel, for which we can translate the Var to a specific
+ * instantiation. We should never be asked to translate to a set
+ * of relids containing more than one leaf target rel, so the
+ * answer will be unique. If we're still considering non-leaf
+ * inheritance levels, return the ROWID_VAR Var as-is.
+ */
+ Relids leaf_result_relids = context->root->leaf_result_relids;
+ Index leaf_relid = 0;
+
+ for (cnt = 0; cnt < nappinfos; cnt++)
+ {
+ if (bms_is_member(appinfos[cnt]->child_relid,
+ leaf_result_relids))
+ {
+ if (leaf_relid)
+ elog(ERROR, "cannot translate to multiple leaf relids");
+ leaf_relid = appinfos[cnt]->child_relid;
+ }
+ }
+
+ if (leaf_relid)
+ {
+ RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *)
+ list_nth(context->root->row_identity_vars, var->varattno - 1);
+
+ if (bms_is_member(leaf_relid, ridinfo->rowidrels))
+ {
+ /* Substitute the Var given in the RowIdentityVarInfo */
+ var = copyObject(ridinfo->rowidvar);
+ /* ... but use the correct relid */
+ var->varno = leaf_relid;
+ /* varnosyn in the RowIdentityVarInfo is probably wrong */
+ var->varnosyn = 0;
+ var->varattnosyn = 0;
+ }
+ else
+ {
+ /*
+ * This leaf rel can't return the desired value, so
+ * substitute a NULL of the correct type.
+ */
+ return (Node *) makeNullConst(var->vartype,
+ var->vartypmod,
+ var->varcollid);
+ }
+ }
+ }
+ return (Node *) var;
+ }
+ if (IsA(node, CurrentOfExpr))
+ {
+ CurrentOfExpr *cexpr = (CurrentOfExpr *) copyObject(node);
+
+ for (cnt = 0; cnt < nappinfos; cnt++)
+ {
+ AppendRelInfo *appinfo = appinfos[cnt];
+
+ if (cexpr->cvarno == appinfo->parent_relid)
+ {
+ cexpr->cvarno = appinfo->child_relid;
+ break;
+ }
+ }
+ return (Node *) cexpr;
+ }
+ if (IsA(node, PlaceHolderVar))
+ {
+ /* Copy the PlaceHolderVar node with correct mutation of subnodes */
+ PlaceHolderVar *phv;
+
+ phv = (PlaceHolderVar *) expression_tree_mutator(node,
+ adjust_appendrel_attrs_mutator,
+ (void *) context);
+ /* now fix PlaceHolderVar's relid sets */
+ if (phv->phlevelsup == 0)
+ phv->phrels = adjust_child_relids(phv->phrels, context->nappinfos,
+ context->appinfos);
+ return (Node *) phv;
+ }
+ /* Shouldn't need to handle planner auxiliary nodes here */
+ Assert(!IsA(node, SpecialJoinInfo));
+ Assert(!IsA(node, AppendRelInfo));
+ Assert(!IsA(node, PlaceHolderInfo));
+ Assert(!IsA(node, MinMaxAggInfo));
+
+ /*
+ * We have to process RestrictInfo nodes specially. (Note: although
+ * set_append_rel_pathlist will hide RestrictInfos in the parent's
+ * baserestrictinfo list from us, it doesn't hide those in joininfo.)
+ */
+ if (IsA(node, RestrictInfo))
+ {
+ RestrictInfo *oldinfo = (RestrictInfo *) node;
+ RestrictInfo *newinfo = makeNode(RestrictInfo);
+
+ /* Copy all flat-copiable fields */
+ memcpy(newinfo, oldinfo, sizeof(RestrictInfo));
+
+ /* Recursively fix the clause itself */
+ newinfo->clause = (Expr *)
+ adjust_appendrel_attrs_mutator((Node *) oldinfo->clause, context);
+
+ /* and the modified version, if an OR clause */
+ newinfo->orclause = (Expr *)
+ adjust_appendrel_attrs_mutator((Node *) oldinfo->orclause, context);
+
+ /* adjust relid sets too */
+ newinfo->clause_relids = adjust_child_relids(oldinfo->clause_relids,
+ context->nappinfos,
+ context->appinfos);
+ newinfo->required_relids = adjust_child_relids(oldinfo->required_relids,
+ context->nappinfos,
+ context->appinfos);
+ newinfo->outer_relids = adjust_child_relids(oldinfo->outer_relids,
+ context->nappinfos,
+ context->appinfos);
+ newinfo->nullable_relids = adjust_child_relids(oldinfo->nullable_relids,
+ context->nappinfos,
+ context->appinfos);
+ newinfo->left_relids = adjust_child_relids(oldinfo->left_relids,
+ context->nappinfos,
+ context->appinfos);
+ newinfo->right_relids = adjust_child_relids(oldinfo->right_relids,
+ context->nappinfos,
+ context->appinfos);
+
+ /*
+ * Reset cached derivative fields, since these might need to have
+ * different values when considering the child relation. Note we
+ * don't reset left_ec/right_ec: each child variable is implicitly
+ * equivalent to its parent, so still a member of the same EC if any.
+ */
+ newinfo->eval_cost.startup = -1;
+ newinfo->norm_selec = -1;
+ newinfo->outer_selec = -1;
+ newinfo->left_em = NULL;
+ newinfo->right_em = NULL;
+ newinfo->scansel_cache = NIL;
+ newinfo->left_bucketsize = -1;
+ newinfo->right_bucketsize = -1;
+ newinfo->left_mcvfreq = -1;
+ newinfo->right_mcvfreq = -1;
+
+ return (Node *) newinfo;
+ }
+
+ /*
+ * NOTE: we do not need to recurse into sublinks, because they should
+ * already have been converted to subplans before we see them.
+ */
+ Assert(!IsA(node, SubLink));
+ Assert(!IsA(node, Query));
+ /* We should never see these Query substructures, either. */
+ Assert(!IsA(node, RangeTblRef));
+ Assert(!IsA(node, JoinExpr));
+
+ return expression_tree_mutator(node, adjust_appendrel_attrs_mutator,
+ (void *) context);
+}
+
+/*
+ * adjust_appendrel_attrs_multilevel
+ * Apply Var translations from a toplevel appendrel parent down to a child.
+ *
+ * In some cases we need to translate expressions referencing a parent relation
+ * to reference an appendrel child that's multiple levels removed from it.
+ */
+Node *
+adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node,
+ Relids child_relids,
+ Relids top_parent_relids)
+{
+ AppendRelInfo **appinfos;
+ Bitmapset *parent_relids = NULL;
+ int nappinfos;
+ int cnt;
+
+ Assert(bms_num_members(child_relids) == bms_num_members(top_parent_relids));
+
+ appinfos = find_appinfos_by_relids(root, child_relids, &nappinfos);
+
+ /* Construct relids set for the immediate parent of given child. */
+ for (cnt = 0; cnt < nappinfos; cnt++)
+ {
+ AppendRelInfo *appinfo = appinfos[cnt];
+
+ parent_relids = bms_add_member(parent_relids, appinfo->parent_relid);
+ }
+
+ /* Recurse if immediate parent is not the top parent. */
+ if (!bms_equal(parent_relids, top_parent_relids))
+ node = adjust_appendrel_attrs_multilevel(root, node, parent_relids,
+ top_parent_relids);
+
+ /* Now translate for this child */
+ node = adjust_appendrel_attrs(root, node, nappinfos, appinfos);
+
+ pfree(appinfos);
+
+ return node;
+}
+
+/*
+ * Substitute child relids for parent relids in a Relid set. The array of
+ * appinfos specifies the substitutions to be performed.
+ */
+Relids
+adjust_child_relids(Relids relids, int nappinfos, AppendRelInfo **appinfos)
+{
+ Bitmapset *result = NULL;
+ int cnt;
+
+ for (cnt = 0; cnt < nappinfos; cnt++)
+ {
+ AppendRelInfo *appinfo = appinfos[cnt];
+
+ /* Remove parent, add child */
+ if (bms_is_member(appinfo->parent_relid, relids))
+ {
+ /* Make a copy if we are changing the set. */
+ if (!result)
+ result = bms_copy(relids);
+
+ result = bms_del_member(result, appinfo->parent_relid);
+ result = bms_add_member(result, appinfo->child_relid);
+ }
+ }
+
+ /* If we made any changes, return the modified copy. */
+ if (result)
+ return result;
+
+ /* Otherwise, return the original set without modification. */
+ return relids;
+}
+
+/*
+ * Replace any relid present in top_parent_relids with its child in
+ * child_relids. Members of child_relids can be multiple levels below top
+ * parent in the partition hierarchy.
+ */
+Relids
+adjust_child_relids_multilevel(PlannerInfo *root, Relids relids,
+ Relids child_relids, Relids top_parent_relids)
+{
+ AppendRelInfo **appinfos;
+ int nappinfos;
+ Relids parent_relids = NULL;
+ Relids result;
+ Relids tmp_result = NULL;
+ int cnt;
+
+ /*
+ * If the given relids set doesn't contain any of the top parent relids,
+ * it will remain unchanged.
+ */
+ if (!bms_overlap(relids, top_parent_relids))
+ return relids;
+
+ appinfos = find_appinfos_by_relids(root, child_relids, &nappinfos);
+
+ /* Construct relids set for the immediate parent of the given child. */
+ for (cnt = 0; cnt < nappinfos; cnt++)
+ {
+ AppendRelInfo *appinfo = appinfos[cnt];
+
+ parent_relids = bms_add_member(parent_relids, appinfo->parent_relid);
+ }
+
+ /* Recurse if immediate parent is not the top parent. */
+ if (!bms_equal(parent_relids, top_parent_relids))
+ {
+ tmp_result = adjust_child_relids_multilevel(root, relids,
+ parent_relids,
+ top_parent_relids);
+ relids = tmp_result;
+ }
+
+ result = adjust_child_relids(relids, nappinfos, appinfos);
+
+ /* Free memory consumed by any intermediate result. */
+ if (tmp_result)
+ bms_free(tmp_result);
+ bms_free(parent_relids);
+ pfree(appinfos);
+
+ return result;
+}
+
+/*
+ * adjust_inherited_attnums
+ * Translate an integer list of attribute numbers from parent to child.
+ */
+List *
+adjust_inherited_attnums(List *attnums, AppendRelInfo *context)
+{
+ List *result = NIL;
+ ListCell *lc;
+
+ /* This should only happen for an inheritance case, not UNION ALL */
+ Assert(OidIsValid(context->parent_reloid));
+
+ /* Look up each attribute in the AppendRelInfo's translated_vars list */
+ foreach(lc, attnums)
+ {
+ AttrNumber parentattno = lfirst_int(lc);
+ Var *childvar;
+
+ /* Look up the translation of this column: it must be a Var */
+ if (parentattno <= 0 ||
+ parentattno > list_length(context->translated_vars))
+ elog(ERROR, "attribute %d of relation \"%s\" does not exist",
+ parentattno, get_rel_name(context->parent_reloid));
+ childvar = (Var *) list_nth(context->translated_vars, parentattno - 1);
+ if (childvar == NULL || !IsA(childvar, Var))
+ elog(ERROR, "attribute %d of relation \"%s\" does not exist",
+ parentattno, get_rel_name(context->parent_reloid));
+
+ result = lappend_int(result, childvar->varattno);
+ }
+ return result;
+}
+
+/*
+ * adjust_inherited_attnums_multilevel
+ * As above, but traverse multiple inheritance levels as needed.
+ */
+List *
+adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums,
+ Index child_relid, Index top_parent_relid)
+{
+ AppendRelInfo *appinfo = root->append_rel_array[child_relid];
+
+ if (!appinfo)
+ elog(ERROR, "child rel %d not found in append_rel_array", child_relid);
+
+ /* Recurse if immediate parent is not the top parent. */
+ if (appinfo->parent_relid != top_parent_relid)
+ attnums = adjust_inherited_attnums_multilevel(root, attnums,
+ appinfo->parent_relid,
+ top_parent_relid);
+
+ /* Now translate for this child */
+ return adjust_inherited_attnums(attnums, appinfo);
+}
+
+/*
+ * get_translated_update_targetlist
+ * Get the processed_tlist of an UPDATE query, translated as needed to
+ * match a child target relation.
+ *
+ * Optionally also return the list of target column numbers translated
+ * to this target relation. (The resnos in processed_tlist MUST NOT be
+ * relied on for this purpose.)
+ */
+void
+get_translated_update_targetlist(PlannerInfo *root, Index relid,
+ List **processed_tlist, List **update_colnos)
+{
+ /* This is pretty meaningless for commands other than UPDATE. */
+ Assert(root->parse->commandType == CMD_UPDATE);
+ if (relid == root->parse->resultRelation)
+ {
+ /*
+ * Non-inheritance case, so it's easy. The caller might be expecting
+ * a tree it can scribble on, though, so copy.
+ */
+ *processed_tlist = copyObject(root->processed_tlist);
+ if (update_colnos)
+ *update_colnos = copyObject(root->update_colnos);
+ }
+ else
+ {
+ Assert(bms_is_member(relid, root->all_result_relids));
+ *processed_tlist = (List *)
+ adjust_appendrel_attrs_multilevel(root,
+ (Node *) root->processed_tlist,
+ bms_make_singleton(relid),
+ bms_make_singleton(root->parse->resultRelation));
+ if (update_colnos)
+ *update_colnos =
+ adjust_inherited_attnums_multilevel(root, root->update_colnos,
+ relid,
+ root->parse->resultRelation);
+ }
+}
+
+/*
+ * find_appinfos_by_relids
+ * Find AppendRelInfo structures for all relations specified by relids.
+ *
+ * The AppendRelInfos are returned in an array, which can be pfree'd by the
+ * caller. *nappinfos is set to the number of entries in the array.
+ */
+AppendRelInfo **
+find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
+{
+ AppendRelInfo **appinfos;
+ int cnt = 0;
+ int i;
+
+ *nappinfos = bms_num_members(relids);
+ appinfos = (AppendRelInfo **) palloc(sizeof(AppendRelInfo *) * *nappinfos);
+
+ i = -1;
+ while ((i = bms_next_member(relids, i)) >= 0)
+ {
+ AppendRelInfo *appinfo = root->append_rel_array[i];
+
+ if (!appinfo)
+ elog(ERROR, "child rel %d not found in append_rel_array", i);
+
+ appinfos[cnt++] = appinfo;
+ }
+ return appinfos;
+}
+
+
+/*****************************************************************************
+ *
+ * ROW-IDENTITY VARIABLE MANAGEMENT
+ *
+ * This code lacks a good home, perhaps. We choose to keep it here because
+ * adjust_appendrel_attrs_mutator() is its principal co-conspirator. That
+ * function does most of what is needed to expand ROWID_VAR Vars into the
+ * right things.
+ *
+ *****************************************************************************/
+
+/*
+ * add_row_identity_var
+ * Register a row-identity column to be used in UPDATE/DELETE/MERGE.
+ *
+ * The Var must be equal(), aside from varno, to any other row-identity
+ * column with the same rowid_name. Thus, for example, "wholerow"
+ * row identities had better use vartype == RECORDOID.
+ *
+ * rtindex is currently redundant with rowid_var->varno, but we specify
+ * it as a separate parameter in case this is ever generalized to support
+ * non-Var expressions. (We could reasonably handle expressions over
+ * Vars of the specified rtindex, but for now that seems unnecessary.)
+ */
+void
+add_row_identity_var(PlannerInfo *root, Var *orig_var,
+ Index rtindex, const char *rowid_name)
+{
+ TargetEntry *tle;
+ Var *rowid_var;
+ RowIdentityVarInfo *ridinfo;
+ ListCell *lc;
+
+ /* For now, the argument must be just a Var of the given rtindex */
+ Assert(IsA(orig_var, Var));
+ Assert(orig_var->varno == rtindex);
+ Assert(orig_var->varlevelsup == 0);
+
+ /*
+ * If we're doing non-inherited UPDATE/DELETE/MERGE, there's little need
+ * for ROWID_VAR shenanigans. Just shove the presented Var into the
+ * processed_tlist, and we're done.
+ */
+ if (rtindex == root->parse->resultRelation)
+ {
+ tle = makeTargetEntry((Expr *) orig_var,
+ list_length(root->processed_tlist) + 1,
+ pstrdup(rowid_name),
+ true);
+ root->processed_tlist = lappend(root->processed_tlist, tle);
+ return;
+ }
+
+ /*
+ * Otherwise, rtindex should reference a leaf target relation that's being
+ * added to the query during expand_inherited_rtentry().
+ */
+ Assert(bms_is_member(rtindex, root->leaf_result_relids));
+ Assert(root->append_rel_array[rtindex] != NULL);
+
+ /*
+ * We have to find a matching RowIdentityVarInfo, or make one if there is
+ * none. To allow using equal() to match the vars, change the varno to
+ * ROWID_VAR, leaving all else alone.
+ */
+ rowid_var = copyObject(orig_var);
+ /* This could eventually become ChangeVarNodes() */
+ rowid_var->varno = ROWID_VAR;
+
+ /* Look for an existing row-id column of the same name */
+ foreach(lc, root->row_identity_vars)
+ {
+ ridinfo = (RowIdentityVarInfo *) lfirst(lc);
+ if (strcmp(rowid_name, ridinfo->rowidname) != 0)
+ continue;
+ if (equal(rowid_var, ridinfo->rowidvar))
+ {
+ /* Found a match; we need only record that rtindex needs it too */
+ ridinfo->rowidrels = bms_add_member(ridinfo->rowidrels, rtindex);
+ return;
+ }
+ else
+ {
+ /* Ooops, can't handle this */
+ elog(ERROR, "conflicting uses of row-identity name \"%s\"",
+ rowid_name);
+ }
+ }
+
+ /* No request yet, so add a new RowIdentityVarInfo */
+ ridinfo = makeNode(RowIdentityVarInfo);
+ ridinfo->rowidvar = copyObject(rowid_var);
+ /* for the moment, estimate width using just the datatype info */
+ ridinfo->rowidwidth = get_typavgwidth(exprType((Node *) rowid_var),
+ exprTypmod((Node *) rowid_var));
+ ridinfo->rowidname = pstrdup(rowid_name);
+ ridinfo->rowidrels = bms_make_singleton(rtindex);
+
+ root->row_identity_vars = lappend(root->row_identity_vars, ridinfo);
+
+ /* Change rowid_var into a reference to this row_identity_vars entry */
+ rowid_var->varattno = list_length(root->row_identity_vars);
+
+ /* Push the ROWID_VAR reference variable into processed_tlist */
+ tle = makeTargetEntry((Expr *) rowid_var,
+ list_length(root->processed_tlist) + 1,
+ pstrdup(rowid_name),
+ true);
+ root->processed_tlist = lappend(root->processed_tlist, tle);
+}
+
+/*
+ * add_row_identity_columns
+ *
+ * This function adds the row identity columns needed by the core code.
+ * FDWs might call add_row_identity_var() for themselves to add nonstandard
+ * columns. (Duplicate requests are fine.)
+ */
+void
+add_row_identity_columns(PlannerInfo *root, Index rtindex,
+ RangeTblEntry *target_rte,
+ Relation target_relation)
+{
+ CmdType commandType = root->parse->commandType;
+ char relkind = target_relation->rd_rel->relkind;
+ Var *var;
+
+ Assert(commandType == CMD_UPDATE || commandType == CMD_DELETE || commandType == CMD_MERGE);
+
+ if (commandType == CMD_MERGE ||
+ relkind == RELKIND_RELATION ||
+ relkind == RELKIND_MATVIEW ||
+ relkind == RELKIND_PARTITIONED_TABLE)
+ {
+ /*
+ * Emit CTID so that executor can find the row to merge, update or
+ * delete.
+ */
+ var = makeVar(rtindex,
+ SelfItemPointerAttributeNumber,
+ TIDOID,
+ -1,
+ InvalidOid,
+ 0);
+ add_row_identity_var(root, var, rtindex, "ctid");
+ }
+ else if (relkind == RELKIND_FOREIGN_TABLE)
+ {
+ /*
+ * Let the foreign table's FDW add whatever junk TLEs it wants.
+ */
+ FdwRoutine *fdwroutine;
+
+ fdwroutine = GetFdwRoutineForRelation(target_relation, false);
+
+ if (fdwroutine->AddForeignUpdateTargets != NULL)
+ fdwroutine->AddForeignUpdateTargets(root, rtindex,
+ target_rte, target_relation);
+
+ /*
+ * For UPDATE, we need to make the FDW fetch unchanged columns by
+ * asking it to fetch a whole-row Var. That's because the top-level
+ * targetlist only contains entries for changed columns, but
+ * ExecUpdate will need to build the complete new tuple. (Actually,
+ * we only really need this in UPDATEs that are not pushed to the
+ * remote side, but it's hard to tell if that will be the case at the
+ * point when this function is called.)
+ *
+ * We will also need the whole row if there are any row triggers, so
+ * that the executor will have the "old" row to pass to the trigger.
+ * Alas, this misses system columns.
+ */
+ if (commandType == CMD_UPDATE ||
+ (target_relation->trigdesc &&
+ (target_relation->trigdesc->trig_delete_after_row ||
+ target_relation->trigdesc->trig_delete_before_row)))
+ {
+ var = makeVar(rtindex,
+ InvalidAttrNumber,
+ RECORDOID,
+ -1,
+ InvalidOid,
+ 0);
+ add_row_identity_var(root, var, rtindex, "wholerow");
+ }
+ }
+}
+
+/*
+ * distribute_row_identity_vars
+ *
+ * After we have finished identifying all the row identity columns
+ * needed by an inherited UPDATE/DELETE/MERGE query, make sure that
+ * these columns will be generated by all the target relations.
+ *
+ * This is more or less like what build_base_rel_tlists() does,
+ * except that it would not understand what to do with ROWID_VAR Vars.
+ * Since that function runs before inheritance relations are expanded,
+ * it will never see any such Vars anyway.
+ */
+void
+distribute_row_identity_vars(PlannerInfo *root)
+{
+ Query *parse = root->parse;
+ int result_relation = parse->resultRelation;
+ RangeTblEntry *target_rte;
+ RelOptInfo *target_rel;
+ ListCell *lc;
+
+ /*
+ * There's nothing to do if this isn't an inherited UPDATE/DELETE/MERGE.
+ */
+ if (parse->commandType != CMD_UPDATE && parse->commandType != CMD_DELETE &&
+ parse->commandType != CMD_MERGE)
+ {
+ Assert(root->row_identity_vars == NIL);
+ return;
+ }
+ target_rte = rt_fetch(result_relation, parse->rtable);
+ if (!target_rte->inh)
+ {
+ Assert(root->row_identity_vars == NIL);
+ return;
+ }
+
+ /*
+ * Ordinarily, we expect that leaf result relation(s) will have added some
+ * ROWID_VAR Vars to the query. However, it's possible that constraint
+ * exclusion suppressed every leaf relation. The executor will get upset
+ * if the plan has no row identity columns at all, even though it will
+ * certainly process no rows. Handle this edge case by re-opening the top
+ * result relation and adding the row identity columns it would have used,
+ * as preprocess_targetlist() would have done if it weren't marked "inh".
+ * Then re-run build_base_rel_tlists() to ensure that the added columns
+ * get propagated to the relation's reltarget. (This is a bit ugly, but
+ * it seems better to confine the ugliness and extra cycles to this
+ * unusual corner case.)
+ */
+ if (root->row_identity_vars == NIL)
+ {
+ Relation target_relation;
+
+ target_relation = table_open(target_rte->relid, NoLock);
+ add_row_identity_columns(root, result_relation,
+ target_rte, target_relation);
+ table_close(target_relation, NoLock);
+ build_base_rel_tlists(root, root->processed_tlist);
+ /* There are no ROWID_VAR Vars in this case, so we're done. */
+ return;
+ }
+
+ /*
+ * Dig through the processed_tlist to find the ROWID_VAR reference Vars,
+ * and forcibly copy them into the reltarget list of the topmost target
+ * relation. That's sufficient because they'll be copied to the
+ * individual leaf target rels (with appropriate translation) later,
+ * during appendrel expansion --- see set_append_rel_size().
+ */
+ target_rel = find_base_rel(root, result_relation);
+
+ foreach(lc, root->processed_tlist)
+ {
+ TargetEntry *tle = lfirst(lc);
+ Var *var = (Var *) tle->expr;
+
+ if (var && IsA(var, Var) && var->varno == ROWID_VAR)
+ {
+ target_rel->reltarget->exprs =
+ lappend(target_rel->reltarget->exprs, copyObject(var));
+ /* reltarget cost and width will be computed later */
+ }
+ }
+}
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
new file mode 100644
index 0000000..f2216f5
--- /dev/null
+++ b/src/backend/optimizer/util/clauses.c
@@ -0,0 +1,5255 @@
+/*-------------------------------------------------------------------------
+ *
+ * clauses.c
+ * routines to manipulate qualification clauses
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/clauses.c
+ *
+ * HISTORY
+ * AUTHOR DATE MAJOR EVENT
+ * Andrew Yu Nov 3, 1994 clause.c and clauses.c combined
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "catalog/pg_aggregate.h"
+#include "catalog/pg_class.h"
+#include "catalog/pg_language.h"
+#include "catalog/pg_operator.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
+#include "executor/executor.h"
+#include "executor/functions.h"
+#include "funcapi.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "nodes/subscripting.h"
+#include "nodes/supportnodes.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/plancat.h"
+#include "optimizer/planmain.h"
+#include "parser/analyze.h"
+#include "parser/parse_agg.h"
+#include "parser/parse_coerce.h"
+#include "parser/parse_func.h"
+#include "rewrite/rewriteHandler.h"
+#include "rewrite/rewriteManip.h"
+#include "tcop/tcopprot.h"
+#include "utils/acl.h"
+#include "utils/builtins.h"
+#include "utils/datum.h"
+#include "utils/fmgroids.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/syscache.h"
+#include "utils/typcache.h"
+
+typedef struct
+{
+ ParamListInfo boundParams;
+ PlannerInfo *root;
+ List *active_fns;
+ Node *case_val;
+ bool estimate;
+} eval_const_expressions_context;
+
+typedef struct
+{
+ int nargs;
+ List *args;
+ int *usecounts;
+} substitute_actual_parameters_context;
+
+typedef struct
+{
+ int nargs;
+ List *args;
+ int sublevels_up;
+} substitute_actual_srf_parameters_context;
+
+typedef struct
+{
+ char *proname;
+ char *prosrc;
+} inline_error_callback_arg;
+
+typedef struct
+{
+ char max_hazard; /* worst proparallel hazard found so far */
+ char max_interesting; /* worst proparallel hazard of interest */
+ List *safe_param_ids; /* PARAM_EXEC Param IDs to treat as safe */
+} max_parallel_hazard_context;
+
+static bool contain_agg_clause_walker(Node *node, void *context);
+static bool find_window_functions_walker(Node *node, WindowFuncLists *lists);
+static bool contain_subplans_walker(Node *node, void *context);
+static bool contain_mutable_functions_walker(Node *node, void *context);
+static bool contain_volatile_functions_walker(Node *node, void *context);
+static bool contain_volatile_functions_not_nextval_walker(Node *node, void *context);
+static bool max_parallel_hazard_walker(Node *node,
+ max_parallel_hazard_context *context);
+static bool contain_nonstrict_functions_walker(Node *node, void *context);
+static bool contain_exec_param_walker(Node *node, List *param_ids);
+static bool contain_context_dependent_node(Node *clause);
+static bool contain_context_dependent_node_walker(Node *node, int *flags);
+static bool contain_leaked_vars_walker(Node *node, void *context);
+static Relids find_nonnullable_rels_walker(Node *node, bool top_level);
+static List *find_nonnullable_vars_walker(Node *node, bool top_level);
+static bool is_strict_saop(ScalarArrayOpExpr *expr, bool falseOK);
+static bool convert_saop_to_hashed_saop_walker(Node *node, void *context);
+static Node *eval_const_expressions_mutator(Node *node,
+ eval_const_expressions_context *context);
+static bool contain_non_const_walker(Node *node, void *context);
+static bool ece_function_is_safe(Oid funcid,
+ eval_const_expressions_context *context);
+static List *simplify_or_arguments(List *args,
+ eval_const_expressions_context *context,
+ bool *haveNull, bool *forceTrue);
+static List *simplify_and_arguments(List *args,
+ eval_const_expressions_context *context,
+ bool *haveNull, bool *forceFalse);
+static Node *simplify_boolean_equality(Oid opno, List *args);
+static Expr *simplify_function(Oid funcid,
+ Oid result_type, int32 result_typmod,
+ Oid result_collid, Oid input_collid, List **args_p,
+ bool funcvariadic, bool process_args, bool allow_non_const,
+ eval_const_expressions_context *context);
+static List *reorder_function_arguments(List *args, int pronargs,
+ HeapTuple func_tuple);
+static List *add_function_defaults(List *args, int pronargs,
+ HeapTuple func_tuple);
+static List *fetch_function_defaults(HeapTuple func_tuple);
+static void recheck_cast_function_args(List *args, Oid result_type,
+ Oid *proargtypes, int pronargs,
+ HeapTuple func_tuple);
+static Expr *evaluate_function(Oid funcid, Oid result_type, int32 result_typmod,
+ Oid result_collid, Oid input_collid, List *args,
+ bool funcvariadic,
+ HeapTuple func_tuple,
+ eval_const_expressions_context *context);
+static Expr *inline_function(Oid funcid, Oid result_type, Oid result_collid,
+ Oid input_collid, List *args,
+ bool funcvariadic,
+ HeapTuple func_tuple,
+ eval_const_expressions_context *context);
+static Node *substitute_actual_parameters(Node *expr, int nargs, List *args,
+ int *usecounts);
+static Node *substitute_actual_parameters_mutator(Node *node,
+ substitute_actual_parameters_context *context);
+static void sql_inline_error_callback(void *arg);
+static Query *substitute_actual_srf_parameters(Query *expr,
+ int nargs, List *args);
+static Node *substitute_actual_srf_parameters_mutator(Node *node,
+ substitute_actual_srf_parameters_context *context);
+static bool pull_paramids_walker(Node *node, Bitmapset **context);
+
+
+/*****************************************************************************
+ * Aggregate-function clause manipulation
+ *****************************************************************************/
+
+/*
+ * contain_agg_clause
+ * Recursively search for Aggref/GroupingFunc nodes within a clause.
+ *
+ * Returns true if any aggregate found.
+ *
+ * This does not descend into subqueries, and so should be used only after
+ * reduction of sublinks to subplans, or in contexts where it's known there
+ * are no subqueries. There mustn't be outer-aggregate references either.
+ *
+ * (If you want something like this but able to deal with subqueries,
+ * see rewriteManip.c's contain_aggs_of_level().)
+ */
+bool
+contain_agg_clause(Node *clause)
+{
+ return contain_agg_clause_walker(clause, NULL);
+}
+
+static bool
+contain_agg_clause_walker(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Aggref))
+ {
+ Assert(((Aggref *) node)->agglevelsup == 0);
+ return true; /* abort the tree traversal and return true */
+ }
+ if (IsA(node, GroupingFunc))
+ {
+ Assert(((GroupingFunc *) node)->agglevelsup == 0);
+ return true; /* abort the tree traversal and return true */
+ }
+ Assert(!IsA(node, SubLink));
+ return expression_tree_walker(node, contain_agg_clause_walker, context);
+}
+
+/*****************************************************************************
+ * Window-function clause manipulation
+ *****************************************************************************/
+
+/*
+ * contain_window_function
+ * Recursively search for WindowFunc nodes within a clause.
+ *
+ * Since window functions don't have level fields, but are hard-wired to
+ * be associated with the current query level, this is just the same as
+ * rewriteManip.c's function.
+ */
+bool
+contain_window_function(Node *clause)
+{
+ return contain_windowfuncs(clause);
+}
+
+/*
+ * find_window_functions
+ * Locate all the WindowFunc nodes in an expression tree, and organize
+ * them by winref ID number.
+ *
+ * Caller must provide an upper bound on the winref IDs expected in the tree.
+ */
+WindowFuncLists *
+find_window_functions(Node *clause, Index maxWinRef)
+{
+ WindowFuncLists *lists = palloc(sizeof(WindowFuncLists));
+
+ lists->numWindowFuncs = 0;
+ lists->maxWinRef = maxWinRef;
+ lists->windowFuncs = (List **) palloc0((maxWinRef + 1) * sizeof(List *));
+ (void) find_window_functions_walker(clause, lists);
+ return lists;
+}
+
+static bool
+find_window_functions_walker(Node *node, WindowFuncLists *lists)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, WindowFunc))
+ {
+ WindowFunc *wfunc = (WindowFunc *) node;
+
+ /* winref is unsigned, so one-sided test is OK */
+ if (wfunc->winref > lists->maxWinRef)
+ elog(ERROR, "WindowFunc contains out-of-range winref %u",
+ wfunc->winref);
+ /* eliminate duplicates, so that we avoid repeated computation */
+ if (!list_member(lists->windowFuncs[wfunc->winref], wfunc))
+ {
+ lists->windowFuncs[wfunc->winref] =
+ lappend(lists->windowFuncs[wfunc->winref], wfunc);
+ lists->numWindowFuncs++;
+ }
+
+ /*
+ * We assume that the parser checked that there are no window
+ * functions in the arguments or filter clause. Hence, we need not
+ * recurse into them. (If either the parser or the planner screws up
+ * on this point, the executor will still catch it; see ExecInitExpr.)
+ */
+ return false;
+ }
+ Assert(!IsA(node, SubLink));
+ return expression_tree_walker(node, find_window_functions_walker,
+ (void *) lists);
+}
+
+
+/*****************************************************************************
+ * Support for expressions returning sets
+ *****************************************************************************/
+
+/*
+ * expression_returns_set_rows
+ * Estimate the number of rows returned by a set-returning expression.
+ * The result is 1 if it's not a set-returning expression.
+ *
+ * We should only examine the top-level function or operator; it used to be
+ * appropriate to recurse, but not anymore. (Even if there are more SRFs in
+ * the function's inputs, their multipliers are accounted for separately.)
+ *
+ * Note: keep this in sync with expression_returns_set() in nodes/nodeFuncs.c.
+ */
+double
+expression_returns_set_rows(PlannerInfo *root, Node *clause)
+{
+ if (clause == NULL)
+ return 1.0;
+ if (IsA(clause, FuncExpr))
+ {
+ FuncExpr *expr = (FuncExpr *) clause;
+
+ if (expr->funcretset)
+ return clamp_row_est(get_function_rows(root, expr->funcid, clause));
+ }
+ if (IsA(clause, OpExpr))
+ {
+ OpExpr *expr = (OpExpr *) clause;
+
+ if (expr->opretset)
+ {
+ set_opfuncid(expr);
+ return clamp_row_est(get_function_rows(root, expr->opfuncid, clause));
+ }
+ }
+ return 1.0;
+}
+
+
+/*****************************************************************************
+ * Subplan clause manipulation
+ *****************************************************************************/
+
+/*
+ * contain_subplans
+ * Recursively search for subplan nodes within a clause.
+ *
+ * If we see a SubLink node, we will return true. This is only possible if
+ * the expression tree hasn't yet been transformed by subselect.c. We do not
+ * know whether the node will produce a true subplan or just an initplan,
+ * but we make the conservative assumption that it will be a subplan.
+ *
+ * Returns true if any subplan found.
+ */
+bool
+contain_subplans(Node *clause)
+{
+ return contain_subplans_walker(clause, NULL);
+}
+
+static bool
+contain_subplans_walker(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, SubPlan) ||
+ IsA(node, AlternativeSubPlan) ||
+ IsA(node, SubLink))
+ return true; /* abort the tree traversal and return true */
+ return expression_tree_walker(node, contain_subplans_walker, context);
+}
+
+
+/*****************************************************************************
+ * Check clauses for mutable functions
+ *****************************************************************************/
+
+/*
+ * contain_mutable_functions
+ * Recursively search for mutable functions within a clause.
+ *
+ * Returns true if any mutable function (or operator implemented by a
+ * mutable function) is found. This test is needed so that we don't
+ * mistakenly think that something like "WHERE random() < 0.5" can be treated
+ * as a constant qualification.
+ *
+ * We will recursively look into Query nodes (i.e., SubLink sub-selects)
+ * but not into SubPlans. See comments for contain_volatile_functions().
+ */
+bool
+contain_mutable_functions(Node *clause)
+{
+ return contain_mutable_functions_walker(clause, NULL);
+}
+
+static bool
+contain_mutable_functions_checker(Oid func_id, void *context)
+{
+ return (func_volatile(func_id) != PROVOLATILE_IMMUTABLE);
+}
+
+static bool
+contain_mutable_functions_walker(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+ /* Check for mutable functions in node itself */
+ if (check_functions_in_node(node, contain_mutable_functions_checker,
+ context))
+ return true;
+
+ if (IsA(node, SQLValueFunction))
+ {
+ /* all variants of SQLValueFunction are stable */
+ return true;
+ }
+
+ if (IsA(node, NextValueExpr))
+ {
+ /* NextValueExpr is volatile */
+ return true;
+ }
+
+ /*
+ * It should be safe to treat MinMaxExpr as immutable, because it will
+ * depend on a non-cross-type btree comparison function, and those should
+ * always be immutable. Treating XmlExpr as immutable is more dubious,
+ * and treating CoerceToDomain as immutable is outright dangerous. But we
+ * have done so historically, and changing this would probably cause more
+ * problems than it would fix. In practice, if you have a non-immutable
+ * domain constraint you are in for pain anyhow.
+ */
+
+ /* Recurse to check arguments */
+ if (IsA(node, Query))
+ {
+ /* Recurse into subselects */
+ return query_tree_walker((Query *) node,
+ contain_mutable_functions_walker,
+ context, 0);
+ }
+ return expression_tree_walker(node, contain_mutable_functions_walker,
+ context);
+}
+
+
+/*****************************************************************************
+ * Check clauses for volatile functions
+ *****************************************************************************/
+
+/*
+ * contain_volatile_functions
+ * Recursively search for volatile functions within a clause.
+ *
+ * Returns true if any volatile function (or operator implemented by a
+ * volatile function) is found. This test prevents, for example,
+ * invalid conversions of volatile expressions into indexscan quals.
+ *
+ * We will recursively look into Query nodes (i.e., SubLink sub-selects)
+ * but not into SubPlans. This is a bit odd, but intentional. If we are
+ * looking at a SubLink, we are probably deciding whether a query tree
+ * transformation is safe, and a contained sub-select should affect that;
+ * for example, duplicating a sub-select containing a volatile function
+ * would be bad. However, once we've got to the stage of having SubPlans,
+ * subsequent planning need not consider volatility within those, since
+ * the executor won't change its evaluation rules for a SubPlan based on
+ * volatility.
+ *
+ * For some node types, for example, RestrictInfo and PathTarget, we cache
+ * whether we found any volatile functions or not and reuse that value in any
+ * future checks for that node. All of the logic for determining if the
+ * cached value should be set to VOLATILITY_NOVOLATILE or VOLATILITY_VOLATILE
+ * belongs in this function. Any code which makes changes to these nodes
+ * which could change the outcome this function must set the cached value back
+ * to VOLATILITY_UNKNOWN. That allows this function to redetermine the
+ * correct value during the next call, should we need to redetermine if the
+ * node contains any volatile functions again in the future.
+ */
+bool
+contain_volatile_functions(Node *clause)
+{
+ return contain_volatile_functions_walker(clause, NULL);
+}
+
+static bool
+contain_volatile_functions_checker(Oid func_id, void *context)
+{
+ return (func_volatile(func_id) == PROVOLATILE_VOLATILE);
+}
+
+static bool
+contain_volatile_functions_walker(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+ /* Check for volatile functions in node itself */
+ if (check_functions_in_node(node, contain_volatile_functions_checker,
+ context))
+ return true;
+
+ if (IsA(node, NextValueExpr))
+ {
+ /* NextValueExpr is volatile */
+ return true;
+ }
+
+ if (IsA(node, RestrictInfo))
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) node;
+
+ /*
+ * For RestrictInfo, check if we've checked the volatility of it
+ * before. If so, we can just use the cached value and not bother
+ * checking it again. Otherwise, check it and cache if whether we
+ * found any volatile functions.
+ */
+ if (rinfo->has_volatile == VOLATILITY_NOVOLATILE)
+ return false;
+ else if (rinfo->has_volatile == VOLATILITY_VOLATILE)
+ return true;
+ else
+ {
+ bool hasvolatile;
+
+ hasvolatile = contain_volatile_functions_walker((Node *) rinfo->clause,
+ context);
+ if (hasvolatile)
+ rinfo->has_volatile = VOLATILITY_VOLATILE;
+ else
+ rinfo->has_volatile = VOLATILITY_NOVOLATILE;
+
+ return hasvolatile;
+ }
+ }
+
+ if (IsA(node, PathTarget))
+ {
+ PathTarget *target = (PathTarget *) node;
+
+ /*
+ * We also do caching for PathTarget the same as we do above for
+ * RestrictInfos.
+ */
+ if (target->has_volatile_expr == VOLATILITY_NOVOLATILE)
+ return false;
+ else if (target->has_volatile_expr == VOLATILITY_VOLATILE)
+ return true;
+ else
+ {
+ bool hasvolatile;
+
+ hasvolatile = contain_volatile_functions_walker((Node *) target->exprs,
+ context);
+
+ if (hasvolatile)
+ target->has_volatile_expr = VOLATILITY_VOLATILE;
+ else
+ target->has_volatile_expr = VOLATILITY_NOVOLATILE;
+
+ return hasvolatile;
+ }
+ }
+
+ /*
+ * See notes in contain_mutable_functions_walker about why we treat
+ * MinMaxExpr, XmlExpr, and CoerceToDomain as immutable, while
+ * SQLValueFunction is stable. Hence, none of them are of interest here.
+ */
+
+ /* Recurse to check arguments */
+ if (IsA(node, Query))
+ {
+ /* Recurse into subselects */
+ return query_tree_walker((Query *) node,
+ contain_volatile_functions_walker,
+ context, 0);
+ }
+ return expression_tree_walker(node, contain_volatile_functions_walker,
+ context);
+}
+
+/*
+ * Special purpose version of contain_volatile_functions() for use in COPY:
+ * ignore nextval(), but treat all other functions normally.
+ */
+bool
+contain_volatile_functions_not_nextval(Node *clause)
+{
+ return contain_volatile_functions_not_nextval_walker(clause, NULL);
+}
+
+static bool
+contain_volatile_functions_not_nextval_checker(Oid func_id, void *context)
+{
+ return (func_id != F_NEXTVAL &&
+ func_volatile(func_id) == PROVOLATILE_VOLATILE);
+}
+
+static bool
+contain_volatile_functions_not_nextval_walker(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+ /* Check for volatile functions in node itself */
+ if (check_functions_in_node(node,
+ contain_volatile_functions_not_nextval_checker,
+ context))
+ return true;
+
+ /*
+ * See notes in contain_mutable_functions_walker about why we treat
+ * MinMaxExpr, XmlExpr, and CoerceToDomain as immutable, while
+ * SQLValueFunction is stable. Hence, none of them are of interest here.
+ * Also, since we're intentionally ignoring nextval(), presumably we
+ * should ignore NextValueExpr.
+ */
+
+ /* Recurse to check arguments */
+ if (IsA(node, Query))
+ {
+ /* Recurse into subselects */
+ return query_tree_walker((Query *) node,
+ contain_volatile_functions_not_nextval_walker,
+ context, 0);
+ }
+ return expression_tree_walker(node,
+ contain_volatile_functions_not_nextval_walker,
+ context);
+}
+
+
+/*****************************************************************************
+ * Check queries for parallel unsafe and/or restricted constructs
+ *****************************************************************************/
+
+/*
+ * max_parallel_hazard
+ * Find the worst parallel-hazard level in the given query
+ *
+ * Returns the worst function hazard property (the earliest in this list:
+ * PROPARALLEL_UNSAFE, PROPARALLEL_RESTRICTED, PROPARALLEL_SAFE) that can
+ * be found in the given parsetree. We use this to find out whether the query
+ * can be parallelized at all. The caller will also save the result in
+ * PlannerGlobal so as to short-circuit checks of portions of the querytree
+ * later, in the common case where everything is SAFE.
+ */
+char
+max_parallel_hazard(Query *parse)
+{
+ max_parallel_hazard_context context;
+
+ context.max_hazard = PROPARALLEL_SAFE;
+ context.max_interesting = PROPARALLEL_UNSAFE;
+ context.safe_param_ids = NIL;
+ (void) max_parallel_hazard_walker((Node *) parse, &context);
+ return context.max_hazard;
+}
+
+/*
+ * is_parallel_safe
+ * Detect whether the given expr contains only parallel-safe functions
+ *
+ * root->glob->maxParallelHazard must previously have been set to the
+ * result of max_parallel_hazard() on the whole query.
+ */
+bool
+is_parallel_safe(PlannerInfo *root, Node *node)
+{
+ max_parallel_hazard_context context;
+ PlannerInfo *proot;
+ ListCell *l;
+
+ /*
+ * Even if the original querytree contained nothing unsafe, we need to
+ * search the expression if we have generated any PARAM_EXEC Params while
+ * planning, because those are parallel-restricted and there might be one
+ * in this expression. But otherwise we don't need to look.
+ */
+ if (root->glob->maxParallelHazard == PROPARALLEL_SAFE &&
+ root->glob->paramExecTypes == NIL)
+ return true;
+ /* Else use max_parallel_hazard's search logic, but stop on RESTRICTED */
+ context.max_hazard = PROPARALLEL_SAFE;
+ context.max_interesting = PROPARALLEL_RESTRICTED;
+ context.safe_param_ids = NIL;
+
+ /*
+ * The params that refer to the same or parent query level are considered
+ * parallel-safe. The idea is that we compute such params at Gather or
+ * Gather Merge node and pass their value to workers.
+ */
+ for (proot = root; proot != NULL; proot = proot->parent_root)
+ {
+ foreach(l, proot->init_plans)
+ {
+ SubPlan *initsubplan = (SubPlan *) lfirst(l);
+
+ context.safe_param_ids = list_concat(context.safe_param_ids,
+ initsubplan->setParam);
+ }
+ }
+
+ return !max_parallel_hazard_walker(node, &context);
+}
+
+/* core logic for all parallel-hazard checks */
+static bool
+max_parallel_hazard_test(char proparallel, max_parallel_hazard_context *context)
+{
+ switch (proparallel)
+ {
+ case PROPARALLEL_SAFE:
+ /* nothing to see here, move along */
+ break;
+ case PROPARALLEL_RESTRICTED:
+ /* increase max_hazard to RESTRICTED */
+ Assert(context->max_hazard != PROPARALLEL_UNSAFE);
+ context->max_hazard = proparallel;
+ /* done if we are not expecting any unsafe functions */
+ if (context->max_interesting == proparallel)
+ return true;
+ break;
+ case PROPARALLEL_UNSAFE:
+ context->max_hazard = proparallel;
+ /* we're always done at the first unsafe construct */
+ return true;
+ default:
+ elog(ERROR, "unrecognized proparallel value \"%c\"", proparallel);
+ break;
+ }
+ return false;
+}
+
+/* check_functions_in_node callback */
+static bool
+max_parallel_hazard_checker(Oid func_id, void *context)
+{
+ return max_parallel_hazard_test(func_parallel(func_id),
+ (max_parallel_hazard_context *) context);
+}
+
+static bool
+max_parallel_hazard_walker(Node *node, max_parallel_hazard_context *context)
+{
+ if (node == NULL)
+ return false;
+
+ /* Check for hazardous functions in node itself */
+ if (check_functions_in_node(node, max_parallel_hazard_checker,
+ context))
+ return true;
+
+ /*
+ * It should be OK to treat MinMaxExpr as parallel-safe, since btree
+ * opclass support functions are generally parallel-safe. XmlExpr is a
+ * bit more dubious but we can probably get away with it. We err on the
+ * side of caution by treating CoerceToDomain as parallel-restricted.
+ * (Note: in principle that's wrong because a domain constraint could
+ * contain a parallel-unsafe function; but useful constraints probably
+ * never would have such, and assuming they do would cripple use of
+ * parallel query in the presence of domain types.) SQLValueFunction
+ * should be safe in all cases. NextValueExpr is parallel-unsafe.
+ */
+ if (IsA(node, CoerceToDomain))
+ {
+ if (max_parallel_hazard_test(PROPARALLEL_RESTRICTED, context))
+ return true;
+ }
+
+ else if (IsA(node, NextValueExpr))
+ {
+ if (max_parallel_hazard_test(PROPARALLEL_UNSAFE, context))
+ return true;
+ }
+
+ /*
+ * Treat window functions as parallel-restricted because we aren't sure
+ * whether the input row ordering is fully deterministic, and the output
+ * of window functions might vary across workers if not. (In some cases,
+ * like where the window frame orders by a primary key, we could relax
+ * this restriction. But it doesn't currently seem worth expending extra
+ * effort to do so.)
+ */
+ else if (IsA(node, WindowFunc))
+ {
+ if (max_parallel_hazard_test(PROPARALLEL_RESTRICTED, context))
+ return true;
+ }
+
+ /*
+ * As a notational convenience for callers, look through RestrictInfo.
+ */
+ else if (IsA(node, RestrictInfo))
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) node;
+
+ return max_parallel_hazard_walker((Node *) rinfo->clause, context);
+ }
+
+ /*
+ * Really we should not see SubLink during a max_interesting == restricted
+ * scan, but if we do, return true.
+ */
+ else if (IsA(node, SubLink))
+ {
+ if (max_parallel_hazard_test(PROPARALLEL_RESTRICTED, context))
+ return true;
+ }
+
+ /*
+ * Only parallel-safe SubPlans can be sent to workers. Within the
+ * testexpr of the SubPlan, Params representing the output columns of the
+ * subplan can be treated as parallel-safe, so temporarily add their IDs
+ * to the safe_param_ids list while examining the testexpr.
+ */
+ else if (IsA(node, SubPlan))
+ {
+ SubPlan *subplan = (SubPlan *) node;
+ List *save_safe_param_ids;
+
+ if (!subplan->parallel_safe &&
+ max_parallel_hazard_test(PROPARALLEL_RESTRICTED, context))
+ return true;
+ save_safe_param_ids = context->safe_param_ids;
+ context->safe_param_ids = list_concat_copy(context->safe_param_ids,
+ subplan->paramIds);
+ if (max_parallel_hazard_walker(subplan->testexpr, context))
+ return true; /* no need to restore safe_param_ids */
+ list_free(context->safe_param_ids);
+ context->safe_param_ids = save_safe_param_ids;
+ /* we must also check args, but no special Param treatment there */
+ if (max_parallel_hazard_walker((Node *) subplan->args, context))
+ return true;
+ /* don't want to recurse normally, so we're done */
+ return false;
+ }
+
+ /*
+ * We can't pass Params to workers at the moment either, so they are also
+ * parallel-restricted, unless they are PARAM_EXTERN Params or are
+ * PARAM_EXEC Params listed in safe_param_ids, meaning they could be
+ * either generated within workers or can be computed by the leader and
+ * then their value can be passed to workers.
+ */
+ else if (IsA(node, Param))
+ {
+ Param *param = (Param *) node;
+
+ if (param->paramkind == PARAM_EXTERN)
+ return false;
+
+ if (param->paramkind != PARAM_EXEC ||
+ !list_member_int(context->safe_param_ids, param->paramid))
+ {
+ if (max_parallel_hazard_test(PROPARALLEL_RESTRICTED, context))
+ return true;
+ }
+ return false; /* nothing to recurse to */
+ }
+
+ /*
+ * When we're first invoked on a completely unplanned tree, we must
+ * recurse into subqueries so to as to locate parallel-unsafe constructs
+ * anywhere in the tree.
+ */
+ else if (IsA(node, Query))
+ {
+ Query *query = (Query *) node;
+
+ /* SELECT FOR UPDATE/SHARE must be treated as unsafe */
+ if (query->rowMarks != NULL)
+ {
+ context->max_hazard = PROPARALLEL_UNSAFE;
+ return true;
+ }
+
+ /* Recurse into subselects */
+ return query_tree_walker(query,
+ max_parallel_hazard_walker,
+ context, 0);
+ }
+
+ /* Recurse to check arguments */
+ return expression_tree_walker(node,
+ max_parallel_hazard_walker,
+ context);
+}
+
+
+/*****************************************************************************
+ * Check clauses for nonstrict functions
+ *****************************************************************************/
+
+/*
+ * contain_nonstrict_functions
+ * Recursively search for nonstrict functions within a clause.
+ *
+ * Returns true if any nonstrict construct is found --- ie, anything that
+ * could produce non-NULL output with a NULL input.
+ *
+ * The idea here is that the caller has verified that the expression contains
+ * one or more Var or Param nodes (as appropriate for the caller's need), and
+ * now wishes to prove that the expression result will be NULL if any of these
+ * inputs is NULL. If we return false, then the proof succeeded.
+ */
+bool
+contain_nonstrict_functions(Node *clause)
+{
+ return contain_nonstrict_functions_walker(clause, NULL);
+}
+
+static bool
+contain_nonstrict_functions_checker(Oid func_id, void *context)
+{
+ return !func_strict(func_id);
+}
+
+static bool
+contain_nonstrict_functions_walker(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Aggref))
+ {
+ /* an aggregate could return non-null with null input */
+ return true;
+ }
+ if (IsA(node, GroupingFunc))
+ {
+ /*
+ * A GroupingFunc doesn't evaluate its arguments, and therefore must
+ * be treated as nonstrict.
+ */
+ return true;
+ }
+ if (IsA(node, WindowFunc))
+ {
+ /* a window function could return non-null with null input */
+ return true;
+ }
+ if (IsA(node, SubscriptingRef))
+ {
+ SubscriptingRef *sbsref = (SubscriptingRef *) node;
+ const SubscriptRoutines *sbsroutines;
+
+ /* Subscripting assignment is always presumed nonstrict */
+ if (sbsref->refassgnexpr != NULL)
+ return true;
+ /* Otherwise we must look up the subscripting support methods */
+ sbsroutines = getSubscriptingRoutines(sbsref->refcontainertype, NULL);
+ if (!(sbsroutines && sbsroutines->fetch_strict))
+ return true;
+ /* else fall through to check args */
+ }
+ if (IsA(node, DistinctExpr))
+ {
+ /* IS DISTINCT FROM is inherently non-strict */
+ return true;
+ }
+ if (IsA(node, NullIfExpr))
+ {
+ /* NULLIF is inherently non-strict */
+ return true;
+ }
+ if (IsA(node, BoolExpr))
+ {
+ BoolExpr *expr = (BoolExpr *) node;
+
+ switch (expr->boolop)
+ {
+ case AND_EXPR:
+ case OR_EXPR:
+ /* AND, OR are inherently non-strict */
+ return true;
+ default:
+ break;
+ }
+ }
+ if (IsA(node, SubLink))
+ {
+ /* In some cases a sublink might be strict, but in general not */
+ return true;
+ }
+ if (IsA(node, SubPlan))
+ return true;
+ if (IsA(node, AlternativeSubPlan))
+ return true;
+ if (IsA(node, FieldStore))
+ return true;
+ if (IsA(node, CoerceViaIO))
+ {
+ /*
+ * CoerceViaIO is strict regardless of whether the I/O functions are,
+ * so just go look at its argument; asking check_functions_in_node is
+ * useless expense and could deliver the wrong answer.
+ */
+ return contain_nonstrict_functions_walker((Node *) ((CoerceViaIO *) node)->arg,
+ context);
+ }
+ if (IsA(node, ArrayCoerceExpr))
+ {
+ /*
+ * ArrayCoerceExpr is strict at the array level, regardless of what
+ * the per-element expression is; so we should ignore elemexpr and
+ * recurse only into the arg.
+ */
+ return contain_nonstrict_functions_walker((Node *) ((ArrayCoerceExpr *) node)->arg,
+ context);
+ }
+ if (IsA(node, CaseExpr))
+ return true;
+ if (IsA(node, ArrayExpr))
+ return true;
+ if (IsA(node, RowExpr))
+ return true;
+ if (IsA(node, RowCompareExpr))
+ return true;
+ if (IsA(node, CoalesceExpr))
+ return true;
+ if (IsA(node, MinMaxExpr))
+ return true;
+ if (IsA(node, XmlExpr))
+ return true;
+ if (IsA(node, NullTest))
+ return true;
+ if (IsA(node, BooleanTest))
+ return true;
+
+ /* Check other function-containing nodes */
+ if (check_functions_in_node(node, contain_nonstrict_functions_checker,
+ context))
+ return true;
+
+ return expression_tree_walker(node, contain_nonstrict_functions_walker,
+ context);
+}
+
+/*****************************************************************************
+ * Check clauses for Params
+ *****************************************************************************/
+
+/*
+ * contain_exec_param
+ * Recursively search for PARAM_EXEC Params within a clause.
+ *
+ * Returns true if the clause contains any PARAM_EXEC Param with a paramid
+ * appearing in the given list of Param IDs. Does not descend into
+ * subqueries!
+ */
+bool
+contain_exec_param(Node *clause, List *param_ids)
+{
+ return contain_exec_param_walker(clause, param_ids);
+}
+
+static bool
+contain_exec_param_walker(Node *node, List *param_ids)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Param))
+ {
+ Param *p = (Param *) node;
+
+ if (p->paramkind == PARAM_EXEC &&
+ list_member_int(param_ids, p->paramid))
+ return true;
+ }
+ return expression_tree_walker(node, contain_exec_param_walker, param_ids);
+}
+
+/*****************************************************************************
+ * Check clauses for context-dependent nodes
+ *****************************************************************************/
+
+/*
+ * contain_context_dependent_node
+ * Recursively search for context-dependent nodes within a clause.
+ *
+ * CaseTestExpr nodes must appear directly within the corresponding CaseExpr,
+ * not nested within another one, or they'll see the wrong test value. If one
+ * appears "bare" in the arguments of a SQL function, then we can't inline the
+ * SQL function for fear of creating such a situation. The same applies for
+ * CaseTestExpr used within the elemexpr of an ArrayCoerceExpr.
+ *
+ * CoerceToDomainValue would have the same issue if domain CHECK expressions
+ * could get inlined into larger expressions, but presently that's impossible.
+ * Still, it might be allowed in future, or other node types with similar
+ * issues might get invented. So give this function a generic name, and set
+ * up the recursion state to allow multiple flag bits.
+ */
+static bool
+contain_context_dependent_node(Node *clause)
+{
+ int flags = 0;
+
+ return contain_context_dependent_node_walker(clause, &flags);
+}
+
+#define CCDN_CASETESTEXPR_OK 0x0001 /* CaseTestExpr okay here? */
+
+static bool
+contain_context_dependent_node_walker(Node *node, int *flags)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, CaseTestExpr))
+ return !(*flags & CCDN_CASETESTEXPR_OK);
+ else if (IsA(node, CaseExpr))
+ {
+ CaseExpr *caseexpr = (CaseExpr *) node;
+
+ /*
+ * If this CASE doesn't have a test expression, then it doesn't create
+ * a context in which CaseTestExprs should appear, so just fall
+ * through and treat it as a generic expression node.
+ */
+ if (caseexpr->arg)
+ {
+ int save_flags = *flags;
+ bool res;
+
+ /*
+ * Note: in principle, we could distinguish the various sub-parts
+ * of a CASE construct and set the flag bit only for some of them,
+ * since we are only expecting CaseTestExprs to appear in the
+ * "expr" subtree of the CaseWhen nodes. But it doesn't really
+ * seem worth any extra code. If there are any bare CaseTestExprs
+ * elsewhere in the CASE, something's wrong already.
+ */
+ *flags |= CCDN_CASETESTEXPR_OK;
+ res = expression_tree_walker(node,
+ contain_context_dependent_node_walker,
+ (void *) flags);
+ *flags = save_flags;
+ return res;
+ }
+ }
+ else if (IsA(node, ArrayCoerceExpr))
+ {
+ ArrayCoerceExpr *ac = (ArrayCoerceExpr *) node;
+ int save_flags;
+ bool res;
+
+ /* Check the array expression */
+ if (contain_context_dependent_node_walker((Node *) ac->arg, flags))
+ return true;
+
+ /* Check the elemexpr, which is allowed to contain CaseTestExpr */
+ save_flags = *flags;
+ *flags |= CCDN_CASETESTEXPR_OK;
+ res = contain_context_dependent_node_walker((Node *) ac->elemexpr,
+ flags);
+ *flags = save_flags;
+ return res;
+ }
+ return expression_tree_walker(node, contain_context_dependent_node_walker,
+ (void *) flags);
+}
+
+/*****************************************************************************
+ * Check clauses for Vars passed to non-leakproof functions
+ *****************************************************************************/
+
+/*
+ * contain_leaked_vars
+ * Recursively scan a clause to discover whether it contains any Var
+ * nodes (of the current query level) that are passed as arguments to
+ * leaky functions.
+ *
+ * Returns true if the clause contains any non-leakproof functions that are
+ * passed Var nodes of the current query level, and which might therefore leak
+ * data. Such clauses must be applied after any lower-level security barrier
+ * clauses.
+ */
+bool
+contain_leaked_vars(Node *clause)
+{
+ return contain_leaked_vars_walker(clause, NULL);
+}
+
+static bool
+contain_leaked_vars_checker(Oid func_id, void *context)
+{
+ return !get_func_leakproof(func_id);
+}
+
+static bool
+contain_leaked_vars_walker(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+
+ switch (nodeTag(node))
+ {
+ case T_Var:
+ case T_Const:
+ case T_Param:
+ case T_ArrayExpr:
+ case T_FieldSelect:
+ case T_FieldStore:
+ case T_NamedArgExpr:
+ case T_BoolExpr:
+ case T_RelabelType:
+ case T_CollateExpr:
+ case T_CaseExpr:
+ case T_CaseTestExpr:
+ case T_RowExpr:
+ case T_SQLValueFunction:
+ case T_NullTest:
+ case T_BooleanTest:
+ case T_NextValueExpr:
+ case T_List:
+
+ /*
+ * We know these node types don't contain function calls; but
+ * something further down in the node tree might.
+ */
+ break;
+
+ case T_FuncExpr:
+ case T_OpExpr:
+ case T_DistinctExpr:
+ case T_NullIfExpr:
+ case T_ScalarArrayOpExpr:
+ case T_CoerceViaIO:
+ case T_ArrayCoerceExpr:
+
+ /*
+ * If node contains a leaky function call, and there's any Var
+ * underneath it, reject.
+ */
+ if (check_functions_in_node(node, contain_leaked_vars_checker,
+ context) &&
+ contain_var_clause(node))
+ return true;
+ break;
+
+ case T_SubscriptingRef:
+ {
+ SubscriptingRef *sbsref = (SubscriptingRef *) node;
+ const SubscriptRoutines *sbsroutines;
+
+ /* Consult the subscripting support method info */
+ sbsroutines = getSubscriptingRoutines(sbsref->refcontainertype,
+ NULL);
+ if (!sbsroutines ||
+ !(sbsref->refassgnexpr != NULL ?
+ sbsroutines->store_leakproof :
+ sbsroutines->fetch_leakproof))
+ {
+ /* Node is leaky, so reject if it contains Vars */
+ if (contain_var_clause(node))
+ return true;
+ }
+ }
+ break;
+
+ case T_RowCompareExpr:
+ {
+ /*
+ * It's worth special-casing this because a leaky comparison
+ * function only compromises one pair of row elements, which
+ * might not contain Vars while others do.
+ */
+ RowCompareExpr *rcexpr = (RowCompareExpr *) node;
+ ListCell *opid;
+ ListCell *larg;
+ ListCell *rarg;
+
+ forthree(opid, rcexpr->opnos,
+ larg, rcexpr->largs,
+ rarg, rcexpr->rargs)
+ {
+ Oid funcid = get_opcode(lfirst_oid(opid));
+
+ if (!get_func_leakproof(funcid) &&
+ (contain_var_clause((Node *) lfirst(larg)) ||
+ contain_var_clause((Node *) lfirst(rarg))))
+ return true;
+ }
+ }
+ break;
+
+ case T_MinMaxExpr:
+ {
+ /*
+ * MinMaxExpr is leakproof if the comparison function it calls
+ * is leakproof.
+ */
+ MinMaxExpr *minmaxexpr = (MinMaxExpr *) node;
+ TypeCacheEntry *typentry;
+ bool leakproof;
+
+ /* Look up the btree comparison function for the datatype */
+ typentry = lookup_type_cache(minmaxexpr->minmaxtype,
+ TYPECACHE_CMP_PROC);
+ if (OidIsValid(typentry->cmp_proc))
+ leakproof = get_func_leakproof(typentry->cmp_proc);
+ else
+ {
+ /*
+ * The executor will throw an error, but here we just
+ * treat the missing function as leaky.
+ */
+ leakproof = false;
+ }
+
+ if (!leakproof &&
+ contain_var_clause((Node *) minmaxexpr->args))
+ return true;
+ }
+ break;
+
+ case T_CurrentOfExpr:
+
+ /*
+ * WHERE CURRENT OF doesn't contain leaky function calls.
+ * Moreover, it is essential that this is considered non-leaky,
+ * since the planner must always generate a TID scan when CURRENT
+ * OF is present -- cf. cost_tidscan.
+ */
+ return false;
+
+ default:
+
+ /*
+ * If we don't recognize the node tag, assume it might be leaky.
+ * This prevents an unexpected security hole if someone adds a new
+ * node type that can call a function.
+ */
+ return true;
+ }
+ return expression_tree_walker(node, contain_leaked_vars_walker,
+ context);
+}
+
+/*
+ * find_nonnullable_rels
+ * Determine which base rels are forced nonnullable by given clause.
+ *
+ * Returns the set of all Relids that are referenced in the clause in such
+ * a way that the clause cannot possibly return TRUE if any of these Relids
+ * is an all-NULL row. (It is OK to err on the side of conservatism; hence
+ * the analysis here is simplistic.)
+ *
+ * The semantics here are subtly different from contain_nonstrict_functions:
+ * that function is concerned with NULL results from arbitrary expressions,
+ * but here we assume that the input is a Boolean expression, and wish to
+ * see if NULL inputs will provably cause a FALSE-or-NULL result. We expect
+ * the expression to have been AND/OR flattened and converted to implicit-AND
+ * format.
+ *
+ * Note: this function is largely duplicative of find_nonnullable_vars().
+ * The reason not to simplify this function into a thin wrapper around
+ * find_nonnullable_vars() is that the tested conditions really are different:
+ * a clause like "t1.v1 IS NOT NULL OR t1.v2 IS NOT NULL" does not prove
+ * that either v1 or v2 can't be NULL, but it does prove that the t1 row
+ * as a whole can't be all-NULL. Also, the behavior for PHVs is different.
+ *
+ * top_level is true while scanning top-level AND/OR structure; here, showing
+ * the result is either FALSE or NULL is good enough. top_level is false when
+ * we have descended below a NOT or a strict function: now we must be able to
+ * prove that the subexpression goes to NULL.
+ *
+ * We don't use expression_tree_walker here because we don't want to descend
+ * through very many kinds of nodes; only the ones we can be sure are strict.
+ */
+Relids
+find_nonnullable_rels(Node *clause)
+{
+ return find_nonnullable_rels_walker(clause, true);
+}
+
+static Relids
+find_nonnullable_rels_walker(Node *node, bool top_level)
+{
+ Relids result = NULL;
+ ListCell *l;
+
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+
+ if (var->varlevelsup == 0)
+ result = bms_make_singleton(var->varno);
+ }
+ else if (IsA(node, List))
+ {
+ /*
+ * At top level, we are examining an implicit-AND list: if any of the
+ * arms produces FALSE-or-NULL then the result is FALSE-or-NULL. If
+ * not at top level, we are examining the arguments of a strict
+ * function: if any of them produce NULL then the result of the
+ * function must be NULL. So in both cases, the set of nonnullable
+ * rels is the union of those found in the arms, and we pass down the
+ * top_level flag unmodified.
+ */
+ foreach(l, (List *) node)
+ {
+ result = bms_join(result,
+ find_nonnullable_rels_walker(lfirst(l),
+ top_level));
+ }
+ }
+ else if (IsA(node, FuncExpr))
+ {
+ FuncExpr *expr = (FuncExpr *) node;
+
+ if (func_strict(expr->funcid))
+ result = find_nonnullable_rels_walker((Node *) expr->args, false);
+ }
+ else if (IsA(node, OpExpr))
+ {
+ OpExpr *expr = (OpExpr *) node;
+
+ set_opfuncid(expr);
+ if (func_strict(expr->opfuncid))
+ result = find_nonnullable_rels_walker((Node *) expr->args, false);
+ }
+ else if (IsA(node, ScalarArrayOpExpr))
+ {
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
+
+ if (is_strict_saop(expr, true))
+ result = find_nonnullable_rels_walker((Node *) expr->args, false);
+ }
+ else if (IsA(node, BoolExpr))
+ {
+ BoolExpr *expr = (BoolExpr *) node;
+
+ switch (expr->boolop)
+ {
+ case AND_EXPR:
+ /* At top level we can just recurse (to the List case) */
+ if (top_level)
+ {
+ result = find_nonnullable_rels_walker((Node *) expr->args,
+ top_level);
+ break;
+ }
+
+ /*
+ * Below top level, even if one arm produces NULL, the result
+ * could be FALSE (hence not NULL). However, if *all* the
+ * arms produce NULL then the result is NULL, so we can take
+ * the intersection of the sets of nonnullable rels, just as
+ * for OR. Fall through to share code.
+ */
+ /* FALL THRU */
+ case OR_EXPR:
+
+ /*
+ * OR is strict if all of its arms are, so we can take the
+ * intersection of the sets of nonnullable rels for each arm.
+ * This works for both values of top_level.
+ */
+ foreach(l, expr->args)
+ {
+ Relids subresult;
+
+ subresult = find_nonnullable_rels_walker(lfirst(l),
+ top_level);
+ if (result == NULL) /* first subresult? */
+ result = subresult;
+ else
+ result = bms_int_members(result, subresult);
+
+ /*
+ * If the intersection is empty, we can stop looking. This
+ * also justifies the test for first-subresult above.
+ */
+ if (bms_is_empty(result))
+ break;
+ }
+ break;
+ case NOT_EXPR:
+ /* NOT will return null if its arg is null */
+ result = find_nonnullable_rels_walker((Node *) expr->args,
+ false);
+ break;
+ default:
+ elog(ERROR, "unrecognized boolop: %d", (int) expr->boolop);
+ break;
+ }
+ }
+ else if (IsA(node, RelabelType))
+ {
+ RelabelType *expr = (RelabelType *) node;
+
+ result = find_nonnullable_rels_walker((Node *) expr->arg, top_level);
+ }
+ else if (IsA(node, CoerceViaIO))
+ {
+ /* not clear this is useful, but it can't hurt */
+ CoerceViaIO *expr = (CoerceViaIO *) node;
+
+ result = find_nonnullable_rels_walker((Node *) expr->arg, top_level);
+ }
+ else if (IsA(node, ArrayCoerceExpr))
+ {
+ /* ArrayCoerceExpr is strict at the array level; ignore elemexpr */
+ ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
+
+ result = find_nonnullable_rels_walker((Node *) expr->arg, top_level);
+ }
+ else if (IsA(node, ConvertRowtypeExpr))
+ {
+ /* not clear this is useful, but it can't hurt */
+ ConvertRowtypeExpr *expr = (ConvertRowtypeExpr *) node;
+
+ result = find_nonnullable_rels_walker((Node *) expr->arg, top_level);
+ }
+ else if (IsA(node, CollateExpr))
+ {
+ CollateExpr *expr = (CollateExpr *) node;
+
+ result = find_nonnullable_rels_walker((Node *) expr->arg, top_level);
+ }
+ else if (IsA(node, NullTest))
+ {
+ /* IS NOT NULL can be considered strict, but only at top level */
+ NullTest *expr = (NullTest *) node;
+
+ if (top_level && expr->nulltesttype == IS_NOT_NULL && !expr->argisrow)
+ result = find_nonnullable_rels_walker((Node *) expr->arg, false);
+ }
+ else if (IsA(node, BooleanTest))
+ {
+ /* Boolean tests that reject NULL are strict at top level */
+ BooleanTest *expr = (BooleanTest *) node;
+
+ if (top_level &&
+ (expr->booltesttype == IS_TRUE ||
+ expr->booltesttype == IS_FALSE ||
+ expr->booltesttype == IS_NOT_UNKNOWN))
+ result = find_nonnullable_rels_walker((Node *) expr->arg, false);
+ }
+ else if (IsA(node, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+
+ /*
+ * If the contained expression forces any rels non-nullable, so does
+ * the PHV.
+ */
+ result = find_nonnullable_rels_walker((Node *) phv->phexpr, top_level);
+
+ /*
+ * If the PHV's syntactic scope is exactly one rel, it will be forced
+ * to be evaluated at that rel, and so it will behave like a Var of
+ * that rel: if the rel's entire output goes to null, so will the PHV.
+ * (If the syntactic scope is a join, we know that the PHV will go to
+ * null if the whole join does; but that is AND semantics while we
+ * need OR semantics for find_nonnullable_rels' result, so we can't do
+ * anything with the knowledge.)
+ */
+ if (phv->phlevelsup == 0 &&
+ bms_membership(phv->phrels) == BMS_SINGLETON)
+ result = bms_add_members(result, phv->phrels);
+ }
+ return result;
+}
+
+/*
+ * find_nonnullable_vars
+ * Determine which Vars are forced nonnullable by given clause.
+ *
+ * Returns a list of all level-zero Vars that are referenced in the clause in
+ * such a way that the clause cannot possibly return TRUE if any of these Vars
+ * is NULL. (It is OK to err on the side of conservatism; hence the analysis
+ * here is simplistic.)
+ *
+ * The semantics here are subtly different from contain_nonstrict_functions:
+ * that function is concerned with NULL results from arbitrary expressions,
+ * but here we assume that the input is a Boolean expression, and wish to
+ * see if NULL inputs will provably cause a FALSE-or-NULL result. We expect
+ * the expression to have been AND/OR flattened and converted to implicit-AND
+ * format.
+ *
+ * The result is a palloc'd List, but we have not copied the member Var nodes.
+ * Also, we don't bother trying to eliminate duplicate entries.
+ *
+ * top_level is true while scanning top-level AND/OR structure; here, showing
+ * the result is either FALSE or NULL is good enough. top_level is false when
+ * we have descended below a NOT or a strict function: now we must be able to
+ * prove that the subexpression goes to NULL.
+ *
+ * We don't use expression_tree_walker here because we don't want to descend
+ * through very many kinds of nodes; only the ones we can be sure are strict.
+ */
+List *
+find_nonnullable_vars(Node *clause)
+{
+ return find_nonnullable_vars_walker(clause, true);
+}
+
+static List *
+find_nonnullable_vars_walker(Node *node, bool top_level)
+{
+ List *result = NIL;
+ ListCell *l;
+
+ if (node == NULL)
+ return NIL;
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+
+ if (var->varlevelsup == 0)
+ result = list_make1(var);
+ }
+ else if (IsA(node, List))
+ {
+ /*
+ * At top level, we are examining an implicit-AND list: if any of the
+ * arms produces FALSE-or-NULL then the result is FALSE-or-NULL. If
+ * not at top level, we are examining the arguments of a strict
+ * function: if any of them produce NULL then the result of the
+ * function must be NULL. So in both cases, the set of nonnullable
+ * vars is the union of those found in the arms, and we pass down the
+ * top_level flag unmodified.
+ */
+ foreach(l, (List *) node)
+ {
+ result = list_concat(result,
+ find_nonnullable_vars_walker(lfirst(l),
+ top_level));
+ }
+ }
+ else if (IsA(node, FuncExpr))
+ {
+ FuncExpr *expr = (FuncExpr *) node;
+
+ if (func_strict(expr->funcid))
+ result = find_nonnullable_vars_walker((Node *) expr->args, false);
+ }
+ else if (IsA(node, OpExpr))
+ {
+ OpExpr *expr = (OpExpr *) node;
+
+ set_opfuncid(expr);
+ if (func_strict(expr->opfuncid))
+ result = find_nonnullable_vars_walker((Node *) expr->args, false);
+ }
+ else if (IsA(node, ScalarArrayOpExpr))
+ {
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
+
+ if (is_strict_saop(expr, true))
+ result = find_nonnullable_vars_walker((Node *) expr->args, false);
+ }
+ else if (IsA(node, BoolExpr))
+ {
+ BoolExpr *expr = (BoolExpr *) node;
+
+ switch (expr->boolop)
+ {
+ case AND_EXPR:
+ /* At top level we can just recurse (to the List case) */
+ if (top_level)
+ {
+ result = find_nonnullable_vars_walker((Node *) expr->args,
+ top_level);
+ break;
+ }
+
+ /*
+ * Below top level, even if one arm produces NULL, the result
+ * could be FALSE (hence not NULL). However, if *all* the
+ * arms produce NULL then the result is NULL, so we can take
+ * the intersection of the sets of nonnullable vars, just as
+ * for OR. Fall through to share code.
+ */
+ /* FALL THRU */
+ case OR_EXPR:
+
+ /*
+ * OR is strict if all of its arms are, so we can take the
+ * intersection of the sets of nonnullable vars for each arm.
+ * This works for both values of top_level.
+ */
+ foreach(l, expr->args)
+ {
+ List *subresult;
+
+ subresult = find_nonnullable_vars_walker(lfirst(l),
+ top_level);
+ if (result == NIL) /* first subresult? */
+ result = subresult;
+ else
+ result = list_intersection(result, subresult);
+
+ /*
+ * If the intersection is empty, we can stop looking. This
+ * also justifies the test for first-subresult above.
+ */
+ if (result == NIL)
+ break;
+ }
+ break;
+ case NOT_EXPR:
+ /* NOT will return null if its arg is null */
+ result = find_nonnullable_vars_walker((Node *) expr->args,
+ false);
+ break;
+ default:
+ elog(ERROR, "unrecognized boolop: %d", (int) expr->boolop);
+ break;
+ }
+ }
+ else if (IsA(node, RelabelType))
+ {
+ RelabelType *expr = (RelabelType *) node;
+
+ result = find_nonnullable_vars_walker((Node *) expr->arg, top_level);
+ }
+ else if (IsA(node, CoerceViaIO))
+ {
+ /* not clear this is useful, but it can't hurt */
+ CoerceViaIO *expr = (CoerceViaIO *) node;
+
+ result = find_nonnullable_vars_walker((Node *) expr->arg, false);
+ }
+ else if (IsA(node, ArrayCoerceExpr))
+ {
+ /* ArrayCoerceExpr is strict at the array level; ignore elemexpr */
+ ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
+
+ result = find_nonnullable_vars_walker((Node *) expr->arg, top_level);
+ }
+ else if (IsA(node, ConvertRowtypeExpr))
+ {
+ /* not clear this is useful, but it can't hurt */
+ ConvertRowtypeExpr *expr = (ConvertRowtypeExpr *) node;
+
+ result = find_nonnullable_vars_walker((Node *) expr->arg, top_level);
+ }
+ else if (IsA(node, CollateExpr))
+ {
+ CollateExpr *expr = (CollateExpr *) node;
+
+ result = find_nonnullable_vars_walker((Node *) expr->arg, top_level);
+ }
+ else if (IsA(node, NullTest))
+ {
+ /* IS NOT NULL can be considered strict, but only at top level */
+ NullTest *expr = (NullTest *) node;
+
+ if (top_level && expr->nulltesttype == IS_NOT_NULL && !expr->argisrow)
+ result = find_nonnullable_vars_walker((Node *) expr->arg, false);
+ }
+ else if (IsA(node, BooleanTest))
+ {
+ /* Boolean tests that reject NULL are strict at top level */
+ BooleanTest *expr = (BooleanTest *) node;
+
+ if (top_level &&
+ (expr->booltesttype == IS_TRUE ||
+ expr->booltesttype == IS_FALSE ||
+ expr->booltesttype == IS_NOT_UNKNOWN))
+ result = find_nonnullable_vars_walker((Node *) expr->arg, false);
+ }
+ else if (IsA(node, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+
+ result = find_nonnullable_vars_walker((Node *) phv->phexpr, top_level);
+ }
+ return result;
+}
+
+/*
+ * find_forced_null_vars
+ * Determine which Vars must be NULL for the given clause to return TRUE.
+ *
+ * This is the complement of find_nonnullable_vars: find the level-zero Vars
+ * that must be NULL for the clause to return TRUE. (It is OK to err on the
+ * side of conservatism; hence the analysis here is simplistic. In fact,
+ * we only detect simple "var IS NULL" tests at the top level.)
+ *
+ * The result is a palloc'd List, but we have not copied the member Var nodes.
+ * Also, we don't bother trying to eliminate duplicate entries.
+ */
+List *
+find_forced_null_vars(Node *node)
+{
+ List *result = NIL;
+ Var *var;
+ ListCell *l;
+
+ if (node == NULL)
+ return NIL;
+ /* Check single-clause cases using subroutine */
+ var = find_forced_null_var(node);
+ if (var)
+ {
+ result = list_make1(var);
+ }
+ /* Otherwise, handle AND-conditions */
+ else if (IsA(node, List))
+ {
+ /*
+ * At top level, we are examining an implicit-AND list: if any of the
+ * arms produces FALSE-or-NULL then the result is FALSE-or-NULL.
+ */
+ foreach(l, (List *) node)
+ {
+ result = list_concat(result,
+ find_forced_null_vars(lfirst(l)));
+ }
+ }
+ else if (IsA(node, BoolExpr))
+ {
+ BoolExpr *expr = (BoolExpr *) node;
+
+ /*
+ * We don't bother considering the OR case, because it's fairly
+ * unlikely anyone would write "v1 IS NULL OR v1 IS NULL". Likewise,
+ * the NOT case isn't worth expending code on.
+ */
+ if (expr->boolop == AND_EXPR)
+ {
+ /* At top level we can just recurse (to the List case) */
+ result = find_forced_null_vars((Node *) expr->args);
+ }
+ }
+ return result;
+}
+
+/*
+ * find_forced_null_var
+ * Return the Var forced null by the given clause, or NULL if it's
+ * not an IS NULL-type clause. For success, the clause must enforce
+ * *only* nullness of the particular Var, not any other conditions.
+ *
+ * This is just the single-clause case of find_forced_null_vars(), without
+ * any allowance for AND conditions. It's used by initsplan.c on individual
+ * qual clauses. The reason for not just applying find_forced_null_vars()
+ * is that if an AND of an IS NULL clause with something else were to somehow
+ * survive AND/OR flattening, initsplan.c might get fooled into discarding
+ * the whole clause when only the IS NULL part of it had been proved redundant.
+ */
+Var *
+find_forced_null_var(Node *node)
+{
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, NullTest))
+ {
+ /* check for var IS NULL */
+ NullTest *expr = (NullTest *) node;
+
+ if (expr->nulltesttype == IS_NULL && !expr->argisrow)
+ {
+ Var *var = (Var *) expr->arg;
+
+ if (var && IsA(var, Var) &&
+ var->varlevelsup == 0)
+ return var;
+ }
+ }
+ else if (IsA(node, BooleanTest))
+ {
+ /* var IS UNKNOWN is equivalent to var IS NULL */
+ BooleanTest *expr = (BooleanTest *) node;
+
+ if (expr->booltesttype == IS_UNKNOWN)
+ {
+ Var *var = (Var *) expr->arg;
+
+ if (var && IsA(var, Var) &&
+ var->varlevelsup == 0)
+ return var;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Can we treat a ScalarArrayOpExpr as strict?
+ *
+ * If "falseOK" is true, then a "false" result can be considered strict,
+ * else we need to guarantee an actual NULL result for NULL input.
+ *
+ * "foo op ALL array" is strict if the op is strict *and* we can prove
+ * that the array input isn't an empty array. We can check that
+ * for the cases of an array constant and an ARRAY[] construct.
+ *
+ * "foo op ANY array" is strict in the falseOK sense if the op is strict.
+ * If not falseOK, the test is the same as for "foo op ALL array".
+ */
+static bool
+is_strict_saop(ScalarArrayOpExpr *expr, bool falseOK)
+{
+ Node *rightop;
+
+ /* The contained operator must be strict. */
+ set_sa_opfuncid(expr);
+ if (!func_strict(expr->opfuncid))
+ return false;
+ /* If ANY and falseOK, that's all we need to check. */
+ if (expr->useOr && falseOK)
+ return true;
+ /* Else, we have to see if the array is provably non-empty. */
+ Assert(list_length(expr->args) == 2);
+ rightop = (Node *) lsecond(expr->args);
+ if (rightop && IsA(rightop, Const))
+ {
+ Datum arraydatum = ((Const *) rightop)->constvalue;
+ bool arrayisnull = ((Const *) rightop)->constisnull;
+ ArrayType *arrayval;
+ int nitems;
+
+ if (arrayisnull)
+ return false;
+ arrayval = DatumGetArrayTypeP(arraydatum);
+ nitems = ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
+ if (nitems > 0)
+ return true;
+ }
+ else if (rightop && IsA(rightop, ArrayExpr))
+ {
+ ArrayExpr *arrayexpr = (ArrayExpr *) rightop;
+
+ if (arrayexpr->elements != NIL && !arrayexpr->multidims)
+ return true;
+ }
+ return false;
+}
+
+
+/*****************************************************************************
+ * Check for "pseudo-constant" clauses
+ *****************************************************************************/
+
+/*
+ * is_pseudo_constant_clause
+ * Detect whether an expression is "pseudo constant", ie, it contains no
+ * variables of the current query level and no uses of volatile functions.
+ * Such an expr is not necessarily a true constant: it can still contain
+ * Params and outer-level Vars, not to mention functions whose results
+ * may vary from one statement to the next. However, the expr's value
+ * will be constant over any one scan of the current query, so it can be
+ * used as, eg, an indexscan key. (Actually, the condition for indexscan
+ * keys is weaker than this; see is_pseudo_constant_for_index().)
+ *
+ * CAUTION: this function omits to test for one very important class of
+ * not-constant expressions, namely aggregates (Aggrefs). In current usage
+ * this is only applied to WHERE clauses and so a check for Aggrefs would be
+ * a waste of cycles; but be sure to also check contain_agg_clause() if you
+ * want to know about pseudo-constness in other contexts. The same goes
+ * for window functions (WindowFuncs).
+ */
+bool
+is_pseudo_constant_clause(Node *clause)
+{
+ /*
+ * We could implement this check in one recursive scan. But since the
+ * check for volatile functions is both moderately expensive and unlikely
+ * to fail, it seems better to look for Vars first and only check for
+ * volatile functions if we find no Vars.
+ */
+ if (!contain_var_clause(clause) &&
+ !contain_volatile_functions(clause))
+ return true;
+ return false;
+}
+
+/*
+ * is_pseudo_constant_clause_relids
+ * Same as above, except caller already has available the var membership
+ * of the expression; this lets us avoid the contain_var_clause() scan.
+ */
+bool
+is_pseudo_constant_clause_relids(Node *clause, Relids relids)
+{
+ if (bms_is_empty(relids) &&
+ !contain_volatile_functions(clause))
+ return true;
+ return false;
+}
+
+
+/*****************************************************************************
+ * *
+ * General clause-manipulating routines *
+ * *
+ *****************************************************************************/
+
+/*
+ * NumRelids
+ * (formerly clause_relids)
+ *
+ * Returns the number of different relations referenced in 'clause'.
+ */
+int
+NumRelids(PlannerInfo *root, Node *clause)
+{
+ Relids varnos = pull_varnos(root, clause);
+ int result = bms_num_members(varnos);
+
+ bms_free(varnos);
+ return result;
+}
+
+/*
+ * CommuteOpExpr: commute a binary operator clause
+ *
+ * XXX the clause is destructively modified!
+ */
+void
+CommuteOpExpr(OpExpr *clause)
+{
+ Oid opoid;
+ Node *temp;
+
+ /* Sanity checks: caller is at fault if these fail */
+ if (!is_opclause(clause) ||
+ list_length(clause->args) != 2)
+ elog(ERROR, "cannot commute non-binary-operator clause");
+
+ opoid = get_commutator(clause->opno);
+
+ if (!OidIsValid(opoid))
+ elog(ERROR, "could not find commutator for operator %u",
+ clause->opno);
+
+ /*
+ * modify the clause in-place!
+ */
+ clause->opno = opoid;
+ clause->opfuncid = InvalidOid;
+ /* opresulttype, opretset, opcollid, inputcollid need not change */
+
+ temp = linitial(clause->args);
+ linitial(clause->args) = lsecond(clause->args);
+ lsecond(clause->args) = temp;
+}
+
+/*
+ * Helper for eval_const_expressions: check that datatype of an attribute
+ * is still what it was when the expression was parsed. This is needed to
+ * guard against improper simplification after ALTER COLUMN TYPE. (XXX we
+ * may well need to make similar checks elsewhere?)
+ *
+ * rowtypeid may come from a whole-row Var, and therefore it can be a domain
+ * over composite, but for this purpose we only care about checking the type
+ * of a contained field.
+ */
+static bool
+rowtype_field_matches(Oid rowtypeid, int fieldnum,
+ Oid expectedtype, int32 expectedtypmod,
+ Oid expectedcollation)
+{
+ TupleDesc tupdesc;
+ Form_pg_attribute attr;
+
+ /* No issue for RECORD, since there is no way to ALTER such a type */
+ if (rowtypeid == RECORDOID)
+ return true;
+ tupdesc = lookup_rowtype_tupdesc_domain(rowtypeid, -1, false);
+ if (fieldnum <= 0 || fieldnum > tupdesc->natts)
+ {
+ ReleaseTupleDesc(tupdesc);
+ return false;
+ }
+ attr = TupleDescAttr(tupdesc, fieldnum - 1);
+ if (attr->attisdropped ||
+ attr->atttypid != expectedtype ||
+ attr->atttypmod != expectedtypmod ||
+ attr->attcollation != expectedcollation)
+ {
+ ReleaseTupleDesc(tupdesc);
+ return false;
+ }
+ ReleaseTupleDesc(tupdesc);
+ return true;
+}
+
+
+/*--------------------
+ * eval_const_expressions
+ *
+ * Reduce any recognizably constant subexpressions of the given
+ * expression tree, for example "2 + 2" => "4". More interestingly,
+ * we can reduce certain boolean expressions even when they contain
+ * non-constant subexpressions: "x OR true" => "true" no matter what
+ * the subexpression x is. (XXX We assume that no such subexpression
+ * will have important side-effects, which is not necessarily a good
+ * assumption in the presence of user-defined functions; do we need a
+ * pg_proc flag that prevents discarding the execution of a function?)
+ *
+ * We do understand that certain functions may deliver non-constant
+ * results even with constant inputs, "nextval()" being the classic
+ * example. Functions that are not marked "immutable" in pg_proc
+ * will not be pre-evaluated here, although we will reduce their
+ * arguments as far as possible.
+ *
+ * Whenever a function is eliminated from the expression by means of
+ * constant-expression evaluation or inlining, we add the function to
+ * root->glob->invalItems. This ensures the plan is known to depend on
+ * such functions, even though they aren't referenced anymore.
+ *
+ * We assume that the tree has already been type-checked and contains
+ * only operators and functions that are reasonable to try to execute.
+ *
+ * NOTE: "root" can be passed as NULL if the caller never wants to do any
+ * Param substitutions nor receive info about inlined functions.
+ *
+ * NOTE: the planner assumes that this will always flatten nested AND and
+ * OR clauses into N-argument form. See comments in prepqual.c.
+ *
+ * NOTE: another critical effect is that any function calls that require
+ * default arguments will be expanded, and named-argument calls will be
+ * converted to positional notation. The executor won't handle either.
+ *--------------------
+ */
+Node *
+eval_const_expressions(PlannerInfo *root, Node *node)
+{
+ eval_const_expressions_context context;
+
+ if (root)
+ context.boundParams = root->glob->boundParams; /* bound Params */
+ else
+ context.boundParams = NULL;
+ context.root = root; /* for inlined-function dependencies */
+ context.active_fns = NIL; /* nothing being recursively simplified */
+ context.case_val = NULL; /* no CASE being examined */
+ context.estimate = false; /* safe transformations only */
+ return eval_const_expressions_mutator(node, &context);
+}
+
+#define MIN_ARRAY_SIZE_FOR_HASHED_SAOP 9
+/*--------------------
+ * convert_saop_to_hashed_saop
+ *
+ * Recursively search 'node' for ScalarArrayOpExprs and fill in the hash
+ * function for any ScalarArrayOpExpr that looks like it would be useful to
+ * evaluate using a hash table rather than a linear search.
+ *
+ * We'll use a hash table if all of the following conditions are met:
+ * 1. The 2nd argument of the array contain only Consts.
+ * 2. useOr is true or there is a valid negator operator for the
+ * ScalarArrayOpExpr's opno.
+ * 3. There's valid hash function for both left and righthand operands and
+ * these hash functions are the same.
+ * 4. If the array contains enough elements for us to consider it to be
+ * worthwhile using a hash table rather than a linear search.
+ */
+void
+convert_saop_to_hashed_saop(Node *node)
+{
+ (void) convert_saop_to_hashed_saop_walker(node, NULL);
+}
+
+static bool
+convert_saop_to_hashed_saop_walker(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+
+ if (IsA(node, ScalarArrayOpExpr))
+ {
+ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
+ Expr *arrayarg = (Expr *) lsecond(saop->args);
+ Oid lefthashfunc;
+ Oid righthashfunc;
+
+ if (arrayarg && IsA(arrayarg, Const) &&
+ !((Const *) arrayarg)->constisnull)
+ {
+ if (saop->useOr)
+ {
+ if (get_op_hash_functions(saop->opno, &lefthashfunc, &righthashfunc) &&
+ lefthashfunc == righthashfunc)
+ {
+ Datum arrdatum = ((Const *) arrayarg)->constvalue;
+ ArrayType *arr = (ArrayType *) DatumGetPointer(arrdatum);
+ int nitems;
+
+ /*
+ * Only fill in the hash functions if the array looks
+ * large enough for it to be worth hashing instead of
+ * doing a linear search.
+ */
+ nitems = ArrayGetNItems(ARR_NDIM(arr), ARR_DIMS(arr));
+
+ if (nitems >= MIN_ARRAY_SIZE_FOR_HASHED_SAOP)
+ {
+ /* Looks good. Fill in the hash functions */
+ saop->hashfuncid = lefthashfunc;
+ }
+ return true;
+ }
+ }
+ else /* !saop->useOr */
+ {
+ Oid negator = get_negator(saop->opno);
+
+ /*
+ * Check if this is a NOT IN using an operator whose negator
+ * is hashable. If so we can still build a hash table and
+ * just ensure the lookup items are not in the hash table.
+ */
+ if (OidIsValid(negator) &&
+ get_op_hash_functions(negator, &lefthashfunc, &righthashfunc) &&
+ lefthashfunc == righthashfunc)
+ {
+ Datum arrdatum = ((Const *) arrayarg)->constvalue;
+ ArrayType *arr = (ArrayType *) DatumGetPointer(arrdatum);
+ int nitems;
+
+ /*
+ * Only fill in the hash functions if the array looks
+ * large enough for it to be worth hashing instead of
+ * doing a linear search.
+ */
+ nitems = ArrayGetNItems(ARR_NDIM(arr), ARR_DIMS(arr));
+
+ if (nitems >= MIN_ARRAY_SIZE_FOR_HASHED_SAOP)
+ {
+ /* Looks good. Fill in the hash functions */
+ saop->hashfuncid = lefthashfunc;
+
+ /*
+ * Also set the negfuncid. The executor will need
+ * that to perform hashtable lookups.
+ */
+ saop->negfuncid = get_opcode(negator);
+ }
+ return true;
+ }
+ }
+ }
+ }
+
+ return expression_tree_walker(node, convert_saop_to_hashed_saop_walker, NULL);
+}
+
+
+/*--------------------
+ * estimate_expression_value
+ *
+ * This function attempts to estimate the value of an expression for
+ * planning purposes. It is in essence a more aggressive version of
+ * eval_const_expressions(): we will perform constant reductions that are
+ * not necessarily 100% safe, but are reasonable for estimation purposes.
+ *
+ * Currently the extra steps that are taken in this mode are:
+ * 1. Substitute values for Params, where a bound Param value has been made
+ * available by the caller of planner(), even if the Param isn't marked
+ * constant. This effectively means that we plan using the first supplied
+ * value of the Param.
+ * 2. Fold stable, as well as immutable, functions to constants.
+ * 3. Reduce PlaceHolderVar nodes to their contained expressions.
+ *--------------------
+ */
+Node *
+estimate_expression_value(PlannerInfo *root, Node *node)
+{
+ eval_const_expressions_context context;
+
+ context.boundParams = root->glob->boundParams; /* bound Params */
+ /* we do not need to mark the plan as depending on inlined functions */
+ context.root = NULL;
+ context.active_fns = NIL; /* nothing being recursively simplified */
+ context.case_val = NULL; /* no CASE being examined */
+ context.estimate = true; /* unsafe transformations OK */
+ return eval_const_expressions_mutator(node, &context);
+}
+
+/*
+ * The generic case in eval_const_expressions_mutator is to recurse using
+ * expression_tree_mutator, which will copy the given node unchanged but
+ * const-simplify its arguments (if any) as far as possible. If the node
+ * itself does immutable processing, and each of its arguments were reduced
+ * to a Const, we can then reduce it to a Const using evaluate_expr. (Some
+ * node types need more complicated logic; for example, a CASE expression
+ * might be reducible to a constant even if not all its subtrees are.)
+ */
+#define ece_generic_processing(node) \
+ expression_tree_mutator((Node *) (node), eval_const_expressions_mutator, \
+ (void *) context)
+
+/*
+ * Check whether all arguments of the given node were reduced to Consts.
+ * By going directly to expression_tree_walker, contain_non_const_walker
+ * is not applied to the node itself, only to its children.
+ */
+#define ece_all_arguments_const(node) \
+ (!expression_tree_walker((Node *) (node), contain_non_const_walker, NULL))
+
+/* Generic macro for applying evaluate_expr */
+#define ece_evaluate_expr(node) \
+ ((Node *) evaluate_expr((Expr *) (node), \
+ exprType((Node *) (node)), \
+ exprTypmod((Node *) (node)), \
+ exprCollation((Node *) (node))))
+
+/*
+ * Recursive guts of eval_const_expressions/estimate_expression_value
+ */
+static Node *
+eval_const_expressions_mutator(Node *node,
+ eval_const_expressions_context *context)
+{
+ if (node == NULL)
+ return NULL;
+ switch (nodeTag(node))
+ {
+ case T_Param:
+ {
+ Param *param = (Param *) node;
+ ParamListInfo paramLI = context->boundParams;
+
+ /* Look to see if we've been given a value for this Param */
+ if (param->paramkind == PARAM_EXTERN &&
+ paramLI != NULL &&
+ param->paramid > 0 &&
+ param->paramid <= paramLI->numParams)
+ {
+ ParamExternData *prm;
+ ParamExternData prmdata;
+
+ /*
+ * Give hook a chance in case parameter is dynamic. Tell
+ * it that this fetch is speculative, so it should avoid
+ * erroring out if parameter is unavailable.
+ */
+ if (paramLI->paramFetch != NULL)
+ prm = paramLI->paramFetch(paramLI, param->paramid,
+ true, &prmdata);
+ else
+ prm = &paramLI->params[param->paramid - 1];
+
+ /*
+ * We don't just check OidIsValid, but insist that the
+ * fetched type match the Param, just in case the hook did
+ * something unexpected. No need to throw an error here
+ * though; leave that for runtime.
+ */
+ if (OidIsValid(prm->ptype) &&
+ prm->ptype == param->paramtype)
+ {
+ /* OK to substitute parameter value? */
+ if (context->estimate ||
+ (prm->pflags & PARAM_FLAG_CONST))
+ {
+ /*
+ * Return a Const representing the param value.
+ * Must copy pass-by-ref datatypes, since the
+ * Param might be in a memory context
+ * shorter-lived than our output plan should be.
+ */
+ int16 typLen;
+ bool typByVal;
+ Datum pval;
+ Const *con;
+
+ get_typlenbyval(param->paramtype,
+ &typLen, &typByVal);
+ if (prm->isnull || typByVal)
+ pval = prm->value;
+ else
+ pval = datumCopy(prm->value, typByVal, typLen);
+ con = makeConst(param->paramtype,
+ param->paramtypmod,
+ param->paramcollid,
+ (int) typLen,
+ pval,
+ prm->isnull,
+ typByVal);
+ con->location = param->location;
+ return (Node *) con;
+ }
+ }
+ }
+
+ /*
+ * Not replaceable, so just copy the Param (no need to
+ * recurse)
+ */
+ return (Node *) copyObject(param);
+ }
+ case T_WindowFunc:
+ {
+ WindowFunc *expr = (WindowFunc *) node;
+ Oid funcid = expr->winfnoid;
+ List *args;
+ Expr *aggfilter;
+ HeapTuple func_tuple;
+ WindowFunc *newexpr;
+
+ /*
+ * We can't really simplify a WindowFunc node, but we mustn't
+ * just fall through to the default processing, because we
+ * have to apply expand_function_arguments to its argument
+ * list. That takes care of inserting default arguments and
+ * expanding named-argument notation.
+ */
+ func_tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
+ if (!HeapTupleIsValid(func_tuple))
+ elog(ERROR, "cache lookup failed for function %u", funcid);
+
+ args = expand_function_arguments(expr->args,
+ false, expr->wintype,
+ func_tuple);
+
+ ReleaseSysCache(func_tuple);
+
+ /* Now, recursively simplify the args (which are a List) */
+ args = (List *)
+ expression_tree_mutator((Node *) args,
+ eval_const_expressions_mutator,
+ (void *) context);
+ /* ... and the filter expression, which isn't */
+ aggfilter = (Expr *)
+ eval_const_expressions_mutator((Node *) expr->aggfilter,
+ context);
+
+ /* And build the replacement WindowFunc node */
+ newexpr = makeNode(WindowFunc);
+ newexpr->winfnoid = expr->winfnoid;
+ newexpr->wintype = expr->wintype;
+ newexpr->wincollid = expr->wincollid;
+ newexpr->inputcollid = expr->inputcollid;
+ newexpr->args = args;
+ newexpr->aggfilter = aggfilter;
+ newexpr->winref = expr->winref;
+ newexpr->winstar = expr->winstar;
+ newexpr->winagg = expr->winagg;
+ newexpr->location = expr->location;
+
+ return (Node *) newexpr;
+ }
+ case T_FuncExpr:
+ {
+ FuncExpr *expr = (FuncExpr *) node;
+ List *args = expr->args;
+ Expr *simple;
+ FuncExpr *newexpr;
+
+ /*
+ * Code for op/func reduction is pretty bulky, so split it out
+ * as a separate function. Note: exprTypmod normally returns
+ * -1 for a FuncExpr, but not when the node is recognizably a
+ * length coercion; we want to preserve the typmod in the
+ * eventual Const if so.
+ */
+ simple = simplify_function(expr->funcid,
+ expr->funcresulttype,
+ exprTypmod(node),
+ expr->funccollid,
+ expr->inputcollid,
+ &args,
+ expr->funcvariadic,
+ true,
+ true,
+ context);
+ if (simple) /* successfully simplified it */
+ return (Node *) simple;
+
+ /*
+ * The expression cannot be simplified any further, so build
+ * and return a replacement FuncExpr node using the
+ * possibly-simplified arguments. Note that we have also
+ * converted the argument list to positional notation.
+ */
+ newexpr = makeNode(FuncExpr);
+ newexpr->funcid = expr->funcid;
+ newexpr->funcresulttype = expr->funcresulttype;
+ newexpr->funcretset = expr->funcretset;
+ newexpr->funcvariadic = expr->funcvariadic;
+ newexpr->funcformat = expr->funcformat;
+ newexpr->funccollid = expr->funccollid;
+ newexpr->inputcollid = expr->inputcollid;
+ newexpr->args = args;
+ newexpr->location = expr->location;
+ return (Node *) newexpr;
+ }
+ case T_OpExpr:
+ {
+ OpExpr *expr = (OpExpr *) node;
+ List *args = expr->args;
+ Expr *simple;
+ OpExpr *newexpr;
+
+ /*
+ * Need to get OID of underlying function. Okay to scribble
+ * on input to this extent.
+ */
+ set_opfuncid(expr);
+
+ /*
+ * Code for op/func reduction is pretty bulky, so split it out
+ * as a separate function.
+ */
+ simple = simplify_function(expr->opfuncid,
+ expr->opresulttype, -1,
+ expr->opcollid,
+ expr->inputcollid,
+ &args,
+ false,
+ true,
+ true,
+ context);
+ if (simple) /* successfully simplified it */
+ return (Node *) simple;
+
+ /*
+ * If the operator is boolean equality or inequality, we know
+ * how to simplify cases involving one constant and one
+ * non-constant argument.
+ */
+ if (expr->opno == BooleanEqualOperator ||
+ expr->opno == BooleanNotEqualOperator)
+ {
+ simple = (Expr *) simplify_boolean_equality(expr->opno,
+ args);
+ if (simple) /* successfully simplified it */
+ return (Node *) simple;
+ }
+
+ /*
+ * The expression cannot be simplified any further, so build
+ * and return a replacement OpExpr node using the
+ * possibly-simplified arguments.
+ */
+ newexpr = makeNode(OpExpr);
+ newexpr->opno = expr->opno;
+ newexpr->opfuncid = expr->opfuncid;
+ newexpr->opresulttype = expr->opresulttype;
+ newexpr->opretset = expr->opretset;
+ newexpr->opcollid = expr->opcollid;
+ newexpr->inputcollid = expr->inputcollid;
+ newexpr->args = args;
+ newexpr->location = expr->location;
+ return (Node *) newexpr;
+ }
+ case T_DistinctExpr:
+ {
+ DistinctExpr *expr = (DistinctExpr *) node;
+ List *args;
+ ListCell *arg;
+ bool has_null_input = false;
+ bool all_null_input = true;
+ bool has_nonconst_input = false;
+ Expr *simple;
+ DistinctExpr *newexpr;
+
+ /*
+ * Reduce constants in the DistinctExpr's arguments. We know
+ * args is either NIL or a List node, so we can call
+ * expression_tree_mutator directly rather than recursing to
+ * self.
+ */
+ args = (List *) expression_tree_mutator((Node *) expr->args,
+ eval_const_expressions_mutator,
+ (void *) context);
+
+ /*
+ * We must do our own check for NULLs because DistinctExpr has
+ * different results for NULL input than the underlying
+ * operator does.
+ */
+ foreach(arg, args)
+ {
+ if (IsA(lfirst(arg), Const))
+ {
+ has_null_input |= ((Const *) lfirst(arg))->constisnull;
+ all_null_input &= ((Const *) lfirst(arg))->constisnull;
+ }
+ else
+ has_nonconst_input = true;
+ }
+
+ /* all constants? then can optimize this out */
+ if (!has_nonconst_input)
+ {
+ /* all nulls? then not distinct */
+ if (all_null_input)
+ return makeBoolConst(false, false);
+
+ /* one null? then distinct */
+ if (has_null_input)
+ return makeBoolConst(true, false);
+
+ /* otherwise try to evaluate the '=' operator */
+ /* (NOT okay to try to inline it, though!) */
+
+ /*
+ * Need to get OID of underlying function. Okay to
+ * scribble on input to this extent.
+ */
+ set_opfuncid((OpExpr *) expr); /* rely on struct
+ * equivalence */
+
+ /*
+ * Code for op/func reduction is pretty bulky, so split it
+ * out as a separate function.
+ */
+ simple = simplify_function(expr->opfuncid,
+ expr->opresulttype, -1,
+ expr->opcollid,
+ expr->inputcollid,
+ &args,
+ false,
+ false,
+ false,
+ context);
+ if (simple) /* successfully simplified it */
+ {
+ /*
+ * Since the underlying operator is "=", must negate
+ * its result
+ */
+ Const *csimple = castNode(Const, simple);
+
+ csimple->constvalue =
+ BoolGetDatum(!DatumGetBool(csimple->constvalue));
+ return (Node *) csimple;
+ }
+ }
+
+ /*
+ * The expression cannot be simplified any further, so build
+ * and return a replacement DistinctExpr node using the
+ * possibly-simplified arguments.
+ */
+ newexpr = makeNode(DistinctExpr);
+ newexpr->opno = expr->opno;
+ newexpr->opfuncid = expr->opfuncid;
+ newexpr->opresulttype = expr->opresulttype;
+ newexpr->opretset = expr->opretset;
+ newexpr->opcollid = expr->opcollid;
+ newexpr->inputcollid = expr->inputcollid;
+ newexpr->args = args;
+ newexpr->location = expr->location;
+ return (Node *) newexpr;
+ }
+ case T_NullIfExpr:
+ {
+ NullIfExpr *expr;
+ ListCell *arg;
+ bool has_nonconst_input = false;
+
+ /* Copy the node and const-simplify its arguments */
+ expr = (NullIfExpr *) ece_generic_processing(node);
+
+ /* If either argument is NULL they can't be equal */
+ foreach(arg, expr->args)
+ {
+ if (!IsA(lfirst(arg), Const))
+ has_nonconst_input = true;
+ else if (((Const *) lfirst(arg))->constisnull)
+ return (Node *) linitial(expr->args);
+ }
+
+ /*
+ * Need to get OID of underlying function before checking if
+ * the function is OK to evaluate.
+ */
+ set_opfuncid((OpExpr *) expr);
+
+ if (!has_nonconst_input &&
+ ece_function_is_safe(expr->opfuncid, context))
+ return ece_evaluate_expr(expr);
+
+ return (Node *) expr;
+ }
+ case T_ScalarArrayOpExpr:
+ {
+ ScalarArrayOpExpr *saop;
+
+ /* Copy the node and const-simplify its arguments */
+ saop = (ScalarArrayOpExpr *) ece_generic_processing(node);
+
+ /* Make sure we know underlying function */
+ set_sa_opfuncid(saop);
+
+ /*
+ * If all arguments are Consts, and it's a safe function, we
+ * can fold to a constant
+ */
+ if (ece_all_arguments_const(saop) &&
+ ece_function_is_safe(saop->opfuncid, context))
+ return ece_evaluate_expr(saop);
+ return (Node *) saop;
+ }
+ case T_BoolExpr:
+ {
+ BoolExpr *expr = (BoolExpr *) node;
+
+ switch (expr->boolop)
+ {
+ case OR_EXPR:
+ {
+ List *newargs;
+ bool haveNull = false;
+ bool forceTrue = false;
+
+ newargs = simplify_or_arguments(expr->args,
+ context,
+ &haveNull,
+ &forceTrue);
+ if (forceTrue)
+ return makeBoolConst(true, false);
+ if (haveNull)
+ newargs = lappend(newargs,
+ makeBoolConst(false, true));
+ /* If all the inputs are FALSE, result is FALSE */
+ if (newargs == NIL)
+ return makeBoolConst(false, false);
+
+ /*
+ * If only one nonconst-or-NULL input, it's the
+ * result
+ */
+ if (list_length(newargs) == 1)
+ return (Node *) linitial(newargs);
+ /* Else we still need an OR node */
+ return (Node *) make_orclause(newargs);
+ }
+ case AND_EXPR:
+ {
+ List *newargs;
+ bool haveNull = false;
+ bool forceFalse = false;
+
+ newargs = simplify_and_arguments(expr->args,
+ context,
+ &haveNull,
+ &forceFalse);
+ if (forceFalse)
+ return makeBoolConst(false, false);
+ if (haveNull)
+ newargs = lappend(newargs,
+ makeBoolConst(false, true));
+ /* If all the inputs are TRUE, result is TRUE */
+ if (newargs == NIL)
+ return makeBoolConst(true, false);
+
+ /*
+ * If only one nonconst-or-NULL input, it's the
+ * result
+ */
+ if (list_length(newargs) == 1)
+ return (Node *) linitial(newargs);
+ /* Else we still need an AND node */
+ return (Node *) make_andclause(newargs);
+ }
+ case NOT_EXPR:
+ {
+ Node *arg;
+
+ Assert(list_length(expr->args) == 1);
+ arg = eval_const_expressions_mutator(linitial(expr->args),
+ context);
+
+ /*
+ * Use negate_clause() to see if we can simplify
+ * away the NOT.
+ */
+ return negate_clause(arg);
+ }
+ default:
+ elog(ERROR, "unrecognized boolop: %d",
+ (int) expr->boolop);
+ break;
+ }
+ break;
+ }
+ case T_SubPlan:
+ case T_AlternativeSubPlan:
+
+ /*
+ * Return a SubPlan unchanged --- too late to do anything with it.
+ *
+ * XXX should we ereport() here instead? Probably this routine
+ * should never be invoked after SubPlan creation.
+ */
+ return node;
+ case T_RelabelType:
+ {
+ RelabelType *relabel = (RelabelType *) node;
+ Node *arg;
+
+ /* Simplify the input ... */
+ arg = eval_const_expressions_mutator((Node *) relabel->arg,
+ context);
+ /* ... and attach a new RelabelType node, if needed */
+ return applyRelabelType(arg,
+ relabel->resulttype,
+ relabel->resulttypmod,
+ relabel->resultcollid,
+ relabel->relabelformat,
+ relabel->location,
+ true);
+ }
+ case T_CoerceViaIO:
+ {
+ CoerceViaIO *expr = (CoerceViaIO *) node;
+ List *args;
+ Oid outfunc;
+ bool outtypisvarlena;
+ Oid infunc;
+ Oid intypioparam;
+ Expr *simple;
+ CoerceViaIO *newexpr;
+
+ /* Make a List so we can use simplify_function */
+ args = list_make1(expr->arg);
+
+ /*
+ * CoerceViaIO represents calling the source type's output
+ * function then the result type's input function. So, try to
+ * simplify it as though it were a stack of two such function
+ * calls. First we need to know what the functions are.
+ *
+ * Note that the coercion functions are assumed not to care
+ * about input collation, so we just pass InvalidOid for that.
+ */
+ getTypeOutputInfo(exprType((Node *) expr->arg),
+ &outfunc, &outtypisvarlena);
+ getTypeInputInfo(expr->resulttype,
+ &infunc, &intypioparam);
+
+ simple = simplify_function(outfunc,
+ CSTRINGOID, -1,
+ InvalidOid,
+ InvalidOid,
+ &args,
+ false,
+ true,
+ true,
+ context);
+ if (simple) /* successfully simplified output fn */
+ {
+ /*
+ * Input functions may want 1 to 3 arguments. We always
+ * supply all three, trusting that nothing downstream will
+ * complain.
+ */
+ args = list_make3(simple,
+ makeConst(OIDOID,
+ -1,
+ InvalidOid,
+ sizeof(Oid),
+ ObjectIdGetDatum(intypioparam),
+ false,
+ true),
+ makeConst(INT4OID,
+ -1,
+ InvalidOid,
+ sizeof(int32),
+ Int32GetDatum(-1),
+ false,
+ true));
+
+ simple = simplify_function(infunc,
+ expr->resulttype, -1,
+ expr->resultcollid,
+ InvalidOid,
+ &args,
+ false,
+ false,
+ true,
+ context);
+ if (simple) /* successfully simplified input fn */
+ return (Node *) simple;
+ }
+
+ /*
+ * The expression cannot be simplified any further, so build
+ * and return a replacement CoerceViaIO node using the
+ * possibly-simplified argument.
+ */
+ newexpr = makeNode(CoerceViaIO);
+ newexpr->arg = (Expr *) linitial(args);
+ newexpr->resulttype = expr->resulttype;
+ newexpr->resultcollid = expr->resultcollid;
+ newexpr->coerceformat = expr->coerceformat;
+ newexpr->location = expr->location;
+ return (Node *) newexpr;
+ }
+ case T_ArrayCoerceExpr:
+ {
+ ArrayCoerceExpr *ac = makeNode(ArrayCoerceExpr);
+ Node *save_case_val;
+
+ /*
+ * Copy the node and const-simplify its arguments. We can't
+ * use ece_generic_processing() here because we need to mess
+ * with case_val only while processing the elemexpr.
+ */
+ memcpy(ac, node, sizeof(ArrayCoerceExpr));
+ ac->arg = (Expr *)
+ eval_const_expressions_mutator((Node *) ac->arg,
+ context);
+
+ /*
+ * Set up for the CaseTestExpr node contained in the elemexpr.
+ * We must prevent it from absorbing any outer CASE value.
+ */
+ save_case_val = context->case_val;
+ context->case_val = NULL;
+
+ ac->elemexpr = (Expr *)
+ eval_const_expressions_mutator((Node *) ac->elemexpr,
+ context);
+
+ context->case_val = save_case_val;
+
+ /*
+ * If constant argument and the per-element expression is
+ * immutable, we can simplify the whole thing to a constant.
+ * Exception: although contain_mutable_functions considers
+ * CoerceToDomain immutable for historical reasons, let's not
+ * do so here; this ensures coercion to an array-over-domain
+ * does not apply the domain's constraints until runtime.
+ */
+ if (ac->arg && IsA(ac->arg, Const) &&
+ ac->elemexpr && !IsA(ac->elemexpr, CoerceToDomain) &&
+ !contain_mutable_functions((Node *) ac->elemexpr))
+ return ece_evaluate_expr(ac);
+
+ return (Node *) ac;
+ }
+ case T_CollateExpr:
+ {
+ /*
+ * We replace CollateExpr with RelabelType, so as to improve
+ * uniformity of expression representation and thus simplify
+ * comparison of expressions. Hence this looks very nearly
+ * the same as the RelabelType case, and we can apply the same
+ * optimizations to avoid unnecessary RelabelTypes.
+ */
+ CollateExpr *collate = (CollateExpr *) node;
+ Node *arg;
+
+ /* Simplify the input ... */
+ arg = eval_const_expressions_mutator((Node *) collate->arg,
+ context);
+ /* ... and attach a new RelabelType node, if needed */
+ return applyRelabelType(arg,
+ exprType(arg),
+ exprTypmod(arg),
+ collate->collOid,
+ COERCE_IMPLICIT_CAST,
+ collate->location,
+ true);
+ }
+ case T_CaseExpr:
+ {
+ /*----------
+ * CASE expressions can be simplified if there are constant
+ * condition clauses:
+ * FALSE (or NULL): drop the alternative
+ * TRUE: drop all remaining alternatives
+ * If the first non-FALSE alternative is a constant TRUE,
+ * we can simplify the entire CASE to that alternative's
+ * expression. If there are no non-FALSE alternatives,
+ * we simplify the entire CASE to the default result (ELSE).
+ *
+ * If we have a simple-form CASE with constant test
+ * expression, we substitute the constant value for contained
+ * CaseTestExpr placeholder nodes, so that we have the
+ * opportunity to reduce constant test conditions. For
+ * example this allows
+ * CASE 0 WHEN 0 THEN 1 ELSE 1/0 END
+ * to reduce to 1 rather than drawing a divide-by-0 error.
+ * Note that when the test expression is constant, we don't
+ * have to include it in the resulting CASE; for example
+ * CASE 0 WHEN x THEN y ELSE z END
+ * is transformed by the parser to
+ * CASE 0 WHEN CaseTestExpr = x THEN y ELSE z END
+ * which we can simplify to
+ * CASE WHEN 0 = x THEN y ELSE z END
+ * It is not necessary for the executor to evaluate the "arg"
+ * expression when executing the CASE, since any contained
+ * CaseTestExprs that might have referred to it will have been
+ * replaced by the constant.
+ *----------
+ */
+ CaseExpr *caseexpr = (CaseExpr *) node;
+ CaseExpr *newcase;
+ Node *save_case_val;
+ Node *newarg;
+ List *newargs;
+ bool const_true_cond;
+ Node *defresult = NULL;
+ ListCell *arg;
+
+ /* Simplify the test expression, if any */
+ newarg = eval_const_expressions_mutator((Node *) caseexpr->arg,
+ context);
+
+ /* Set up for contained CaseTestExpr nodes */
+ save_case_val = context->case_val;
+ if (newarg && IsA(newarg, Const))
+ {
+ context->case_val = newarg;
+ newarg = NULL; /* not needed anymore, see above */
+ }
+ else
+ context->case_val = NULL;
+
+ /* Simplify the WHEN clauses */
+ newargs = NIL;
+ const_true_cond = false;
+ foreach(arg, caseexpr->args)
+ {
+ CaseWhen *oldcasewhen = lfirst_node(CaseWhen, arg);
+ Node *casecond;
+ Node *caseresult;
+
+ /* Simplify this alternative's test condition */
+ casecond = eval_const_expressions_mutator((Node *) oldcasewhen->expr,
+ context);
+
+ /*
+ * If the test condition is constant FALSE (or NULL), then
+ * drop this WHEN clause completely, without processing
+ * the result.
+ */
+ if (casecond && IsA(casecond, Const))
+ {
+ Const *const_input = (Const *) casecond;
+
+ if (const_input->constisnull ||
+ !DatumGetBool(const_input->constvalue))
+ continue; /* drop alternative with FALSE cond */
+ /* Else it's constant TRUE */
+ const_true_cond = true;
+ }
+
+ /* Simplify this alternative's result value */
+ caseresult = eval_const_expressions_mutator((Node *) oldcasewhen->result,
+ context);
+
+ /* If non-constant test condition, emit a new WHEN node */
+ if (!const_true_cond)
+ {
+ CaseWhen *newcasewhen = makeNode(CaseWhen);
+
+ newcasewhen->expr = (Expr *) casecond;
+ newcasewhen->result = (Expr *) caseresult;
+ newcasewhen->location = oldcasewhen->location;
+ newargs = lappend(newargs, newcasewhen);
+ continue;
+ }
+
+ /*
+ * Found a TRUE condition, so none of the remaining
+ * alternatives can be reached. We treat the result as
+ * the default result.
+ */
+ defresult = caseresult;
+ break;
+ }
+
+ /* Simplify the default result, unless we replaced it above */
+ if (!const_true_cond)
+ defresult = eval_const_expressions_mutator((Node *) caseexpr->defresult,
+ context);
+
+ context->case_val = save_case_val;
+
+ /*
+ * If no non-FALSE alternatives, CASE reduces to the default
+ * result
+ */
+ if (newargs == NIL)
+ return defresult;
+ /* Otherwise we need a new CASE node */
+ newcase = makeNode(CaseExpr);
+ newcase->casetype = caseexpr->casetype;
+ newcase->casecollid = caseexpr->casecollid;
+ newcase->arg = (Expr *) newarg;
+ newcase->args = newargs;
+ newcase->defresult = (Expr *) defresult;
+ newcase->location = caseexpr->location;
+ return (Node *) newcase;
+ }
+ case T_CaseTestExpr:
+ {
+ /*
+ * If we know a constant test value for the current CASE
+ * construct, substitute it for the placeholder. Else just
+ * return the placeholder as-is.
+ */
+ if (context->case_val)
+ return copyObject(context->case_val);
+ else
+ return copyObject(node);
+ }
+ case T_SubscriptingRef:
+ case T_ArrayExpr:
+ case T_RowExpr:
+ case T_MinMaxExpr:
+ {
+ /*
+ * Generic handling for node types whose own processing is
+ * known to be immutable, and for which we need no smarts
+ * beyond "simplify if all inputs are constants".
+ *
+ * Treating SubscriptingRef this way assumes that subscripting
+ * fetch and assignment are both immutable. This constrains
+ * type-specific subscripting implementations; maybe we should
+ * relax it someday.
+ *
+ * Treating MinMaxExpr this way amounts to assuming that the
+ * btree comparison function it calls is immutable; see the
+ * reasoning in contain_mutable_functions_walker.
+ */
+
+ /* Copy the node and const-simplify its arguments */
+ node = ece_generic_processing(node);
+ /* If all arguments are Consts, we can fold to a constant */
+ if (ece_all_arguments_const(node))
+ return ece_evaluate_expr(node);
+ return node;
+ }
+ case T_CoalesceExpr:
+ {
+ CoalesceExpr *coalesceexpr = (CoalesceExpr *) node;
+ CoalesceExpr *newcoalesce;
+ List *newargs;
+ ListCell *arg;
+
+ newargs = NIL;
+ foreach(arg, coalesceexpr->args)
+ {
+ Node *e;
+
+ e = eval_const_expressions_mutator((Node *) lfirst(arg),
+ context);
+
+ /*
+ * We can remove null constants from the list. For a
+ * non-null constant, if it has not been preceded by any
+ * other non-null-constant expressions then it is the
+ * result. Otherwise, it's the next argument, but we can
+ * drop following arguments since they will never be
+ * reached.
+ */
+ if (IsA(e, Const))
+ {
+ if (((Const *) e)->constisnull)
+ continue; /* drop null constant */
+ if (newargs == NIL)
+ return e; /* first expr */
+ newargs = lappend(newargs, e);
+ break;
+ }
+ newargs = lappend(newargs, e);
+ }
+
+ /*
+ * If all the arguments were constant null, the result is just
+ * null
+ */
+ if (newargs == NIL)
+ return (Node *) makeNullConst(coalesceexpr->coalescetype,
+ -1,
+ coalesceexpr->coalescecollid);
+
+ newcoalesce = makeNode(CoalesceExpr);
+ newcoalesce->coalescetype = coalesceexpr->coalescetype;
+ newcoalesce->coalescecollid = coalesceexpr->coalescecollid;
+ newcoalesce->args = newargs;
+ newcoalesce->location = coalesceexpr->location;
+ return (Node *) newcoalesce;
+ }
+ case T_SQLValueFunction:
+ {
+ /*
+ * All variants of SQLValueFunction are stable, so if we are
+ * estimating the expression's value, we should evaluate the
+ * current function value. Otherwise just copy.
+ */
+ SQLValueFunction *svf = (SQLValueFunction *) node;
+
+ if (context->estimate)
+ return (Node *) evaluate_expr((Expr *) svf,
+ svf->type,
+ svf->typmod,
+ InvalidOid);
+ else
+ return copyObject((Node *) svf);
+ }
+ case T_FieldSelect:
+ {
+ /*
+ * We can optimize field selection from a whole-row Var into a
+ * simple Var. (This case won't be generated directly by the
+ * parser, because ParseComplexProjection short-circuits it.
+ * But it can arise while simplifying functions.) Also, we
+ * can optimize field selection from a RowExpr construct, or
+ * of course from a constant.
+ *
+ * However, replacing a whole-row Var in this way has a
+ * pitfall: if we've already built the rel targetlist for the
+ * source relation, then the whole-row Var is scheduled to be
+ * produced by the relation scan, but the simple Var probably
+ * isn't, which will lead to a failure in setrefs.c. This is
+ * not a problem when handling simple single-level queries, in
+ * which expression simplification always happens first. It
+ * is a risk for lateral references from subqueries, though.
+ * To avoid such failures, don't optimize uplevel references.
+ *
+ * We must also check that the declared type of the field is
+ * still the same as when the FieldSelect was created --- this
+ * can change if someone did ALTER COLUMN TYPE on the rowtype.
+ * If it isn't, we skip the optimization; the case will
+ * probably fail at runtime, but that's not our problem here.
+ */
+ FieldSelect *fselect = (FieldSelect *) node;
+ FieldSelect *newfselect;
+ Node *arg;
+
+ arg = eval_const_expressions_mutator((Node *) fselect->arg,
+ context);
+ if (arg && IsA(arg, Var) &&
+ ((Var *) arg)->varattno == InvalidAttrNumber &&
+ ((Var *) arg)->varlevelsup == 0)
+ {
+ if (rowtype_field_matches(((Var *) arg)->vartype,
+ fselect->fieldnum,
+ fselect->resulttype,
+ fselect->resulttypmod,
+ fselect->resultcollid))
+ return (Node *) makeVar(((Var *) arg)->varno,
+ fselect->fieldnum,
+ fselect->resulttype,
+ fselect->resulttypmod,
+ fselect->resultcollid,
+ ((Var *) arg)->varlevelsup);
+ }
+ if (arg && IsA(arg, RowExpr))
+ {
+ RowExpr *rowexpr = (RowExpr *) arg;
+
+ if (fselect->fieldnum > 0 &&
+ fselect->fieldnum <= list_length(rowexpr->args))
+ {
+ Node *fld = (Node *) list_nth(rowexpr->args,
+ fselect->fieldnum - 1);
+
+ if (rowtype_field_matches(rowexpr->row_typeid,
+ fselect->fieldnum,
+ fselect->resulttype,
+ fselect->resulttypmod,
+ fselect->resultcollid) &&
+ fselect->resulttype == exprType(fld) &&
+ fselect->resulttypmod == exprTypmod(fld) &&
+ fselect->resultcollid == exprCollation(fld))
+ return fld;
+ }
+ }
+ newfselect = makeNode(FieldSelect);
+ newfselect->arg = (Expr *) arg;
+ newfselect->fieldnum = fselect->fieldnum;
+ newfselect->resulttype = fselect->resulttype;
+ newfselect->resulttypmod = fselect->resulttypmod;
+ newfselect->resultcollid = fselect->resultcollid;
+ if (arg && IsA(arg, Const))
+ {
+ Const *con = (Const *) arg;
+
+ if (rowtype_field_matches(con->consttype,
+ newfselect->fieldnum,
+ newfselect->resulttype,
+ newfselect->resulttypmod,
+ newfselect->resultcollid))
+ return ece_evaluate_expr(newfselect);
+ }
+ return (Node *) newfselect;
+ }
+ case T_NullTest:
+ {
+ NullTest *ntest = (NullTest *) node;
+ NullTest *newntest;
+ Node *arg;
+
+ arg = eval_const_expressions_mutator((Node *) ntest->arg,
+ context);
+ if (ntest->argisrow && arg && IsA(arg, RowExpr))
+ {
+ /*
+ * We break ROW(...) IS [NOT] NULL into separate tests on
+ * its component fields. This form is usually more
+ * efficient to evaluate, as well as being more amenable
+ * to optimization.
+ */
+ RowExpr *rarg = (RowExpr *) arg;
+ List *newargs = NIL;
+ ListCell *l;
+
+ foreach(l, rarg->args)
+ {
+ Node *relem = (Node *) lfirst(l);
+
+ /*
+ * A constant field refutes the whole NullTest if it's
+ * of the wrong nullness; else we can discard it.
+ */
+ if (relem && IsA(relem, Const))
+ {
+ Const *carg = (Const *) relem;
+
+ if (carg->constisnull ?
+ (ntest->nulltesttype == IS_NOT_NULL) :
+ (ntest->nulltesttype == IS_NULL))
+ return makeBoolConst(false, false);
+ continue;
+ }
+
+ /*
+ * Else, make a scalar (argisrow == false) NullTest
+ * for this field. Scalar semantics are required
+ * because IS [NOT] NULL doesn't recurse; see comments
+ * in ExecEvalRowNullInt().
+ */
+ newntest = makeNode(NullTest);
+ newntest->arg = (Expr *) relem;
+ newntest->nulltesttype = ntest->nulltesttype;
+ newntest->argisrow = false;
+ newntest->location = ntest->location;
+ newargs = lappend(newargs, newntest);
+ }
+ /* If all the inputs were constants, result is TRUE */
+ if (newargs == NIL)
+ return makeBoolConst(true, false);
+ /* If only one nonconst input, it's the result */
+ if (list_length(newargs) == 1)
+ return (Node *) linitial(newargs);
+ /* Else we need an AND node */
+ return (Node *) make_andclause(newargs);
+ }
+ if (!ntest->argisrow && arg && IsA(arg, Const))
+ {
+ Const *carg = (Const *) arg;
+ bool result;
+
+ switch (ntest->nulltesttype)
+ {
+ case IS_NULL:
+ result = carg->constisnull;
+ break;
+ case IS_NOT_NULL:
+ result = !carg->constisnull;
+ break;
+ default:
+ elog(ERROR, "unrecognized nulltesttype: %d",
+ (int) ntest->nulltesttype);
+ result = false; /* keep compiler quiet */
+ break;
+ }
+
+ return makeBoolConst(result, false);
+ }
+
+ newntest = makeNode(NullTest);
+ newntest->arg = (Expr *) arg;
+ newntest->nulltesttype = ntest->nulltesttype;
+ newntest->argisrow = ntest->argisrow;
+ newntest->location = ntest->location;
+ return (Node *) newntest;
+ }
+ case T_BooleanTest:
+ {
+ /*
+ * This case could be folded into the generic handling used
+ * for ArrayExpr etc. But because the simplification logic is
+ * so trivial, applying evaluate_expr() to perform it would be
+ * a heavy overhead. BooleanTest is probably common enough to
+ * justify keeping this bespoke implementation.
+ */
+ BooleanTest *btest = (BooleanTest *) node;
+ BooleanTest *newbtest;
+ Node *arg;
+
+ arg = eval_const_expressions_mutator((Node *) btest->arg,
+ context);
+ if (arg && IsA(arg, Const))
+ {
+ Const *carg = (Const *) arg;
+ bool result;
+
+ switch (btest->booltesttype)
+ {
+ case IS_TRUE:
+ result = (!carg->constisnull &&
+ DatumGetBool(carg->constvalue));
+ break;
+ case IS_NOT_TRUE:
+ result = (carg->constisnull ||
+ !DatumGetBool(carg->constvalue));
+ break;
+ case IS_FALSE:
+ result = (!carg->constisnull &&
+ !DatumGetBool(carg->constvalue));
+ break;
+ case IS_NOT_FALSE:
+ result = (carg->constisnull ||
+ DatumGetBool(carg->constvalue));
+ break;
+ case IS_UNKNOWN:
+ result = carg->constisnull;
+ break;
+ case IS_NOT_UNKNOWN:
+ result = !carg->constisnull;
+ break;
+ default:
+ elog(ERROR, "unrecognized booltesttype: %d",
+ (int) btest->booltesttype);
+ result = false; /* keep compiler quiet */
+ break;
+ }
+
+ return makeBoolConst(result, false);
+ }
+
+ newbtest = makeNode(BooleanTest);
+ newbtest->arg = (Expr *) arg;
+ newbtest->booltesttype = btest->booltesttype;
+ newbtest->location = btest->location;
+ return (Node *) newbtest;
+ }
+ case T_CoerceToDomain:
+ {
+ /*
+ * If the domain currently has no constraints, we replace the
+ * CoerceToDomain node with a simple RelabelType, which is
+ * both far faster to execute and more amenable to later
+ * optimization. We must then mark the plan as needing to be
+ * rebuilt if the domain's constraints change.
+ *
+ * Also, in estimation mode, always replace CoerceToDomain
+ * nodes, effectively assuming that the coercion will succeed.
+ */
+ CoerceToDomain *cdomain = (CoerceToDomain *) node;
+ CoerceToDomain *newcdomain;
+ Node *arg;
+
+ arg = eval_const_expressions_mutator((Node *) cdomain->arg,
+ context);
+ if (context->estimate ||
+ !DomainHasConstraints(cdomain->resulttype))
+ {
+ /* Record dependency, if this isn't estimation mode */
+ if (context->root && !context->estimate)
+ record_plan_type_dependency(context->root,
+ cdomain->resulttype);
+
+ /* Generate RelabelType to substitute for CoerceToDomain */
+ return applyRelabelType(arg,
+ cdomain->resulttype,
+ cdomain->resulttypmod,
+ cdomain->resultcollid,
+ cdomain->coercionformat,
+ cdomain->location,
+ true);
+ }
+
+ newcdomain = makeNode(CoerceToDomain);
+ newcdomain->arg = (Expr *) arg;
+ newcdomain->resulttype = cdomain->resulttype;
+ newcdomain->resulttypmod = cdomain->resulttypmod;
+ newcdomain->resultcollid = cdomain->resultcollid;
+ newcdomain->coercionformat = cdomain->coercionformat;
+ newcdomain->location = cdomain->location;
+ return (Node *) newcdomain;
+ }
+ case T_PlaceHolderVar:
+
+ /*
+ * In estimation mode, just strip the PlaceHolderVar node
+ * altogether; this amounts to estimating that the contained value
+ * won't be forced to null by an outer join. In regular mode we
+ * just use the default behavior (ie, simplify the expression but
+ * leave the PlaceHolderVar node intact).
+ */
+ if (context->estimate)
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+
+ return eval_const_expressions_mutator((Node *) phv->phexpr,
+ context);
+ }
+ break;
+ case T_ConvertRowtypeExpr:
+ {
+ ConvertRowtypeExpr *cre = castNode(ConvertRowtypeExpr, node);
+ Node *arg;
+ ConvertRowtypeExpr *newcre;
+
+ arg = eval_const_expressions_mutator((Node *) cre->arg,
+ context);
+
+ newcre = makeNode(ConvertRowtypeExpr);
+ newcre->resulttype = cre->resulttype;
+ newcre->convertformat = cre->convertformat;
+ newcre->location = cre->location;
+
+ /*
+ * In case of a nested ConvertRowtypeExpr, we can convert the
+ * leaf row directly to the topmost row format without any
+ * intermediate conversions. (This works because
+ * ConvertRowtypeExpr is used only for child->parent
+ * conversion in inheritance trees, which works by exact match
+ * of column name, and a column absent in an intermediate
+ * result can't be present in the final result.)
+ *
+ * No need to check more than one level deep, because the
+ * above recursion will have flattened anything else.
+ */
+ if (arg != NULL && IsA(arg, ConvertRowtypeExpr))
+ {
+ ConvertRowtypeExpr *argcre = (ConvertRowtypeExpr *) arg;
+
+ arg = (Node *) argcre->arg;
+
+ /*
+ * Make sure an outer implicit conversion can't hide an
+ * inner explicit one.
+ */
+ if (newcre->convertformat == COERCE_IMPLICIT_CAST)
+ newcre->convertformat = argcre->convertformat;
+ }
+
+ newcre->arg = (Expr *) arg;
+
+ if (arg != NULL && IsA(arg, Const))
+ return ece_evaluate_expr((Node *) newcre);
+ return (Node *) newcre;
+ }
+ default:
+ break;
+ }
+
+ /*
+ * For any node type not handled above, copy the node unchanged but
+ * const-simplify its subexpressions. This is the correct thing for node
+ * types whose behavior might change between planning and execution, such
+ * as CurrentOfExpr. It's also a safe default for new node types not
+ * known to this routine.
+ */
+ return ece_generic_processing(node);
+}
+
+/*
+ * Subroutine for eval_const_expressions: check for non-Const nodes.
+ *
+ * We can abort recursion immediately on finding a non-Const node. This is
+ * critical for performance, else eval_const_expressions_mutator would take
+ * O(N^2) time on non-simplifiable trees. However, we do need to descend
+ * into List nodes since expression_tree_walker sometimes invokes the walker
+ * function directly on List subtrees.
+ */
+static bool
+contain_non_const_walker(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Const))
+ return false;
+ if (IsA(node, List))
+ return expression_tree_walker(node, contain_non_const_walker, context);
+ /* Otherwise, abort the tree traversal and return true */
+ return true;
+}
+
+/*
+ * Subroutine for eval_const_expressions: check if a function is OK to evaluate
+ */
+static bool
+ece_function_is_safe(Oid funcid, eval_const_expressions_context *context)
+{
+ char provolatile = func_volatile(funcid);
+
+ /*
+ * Ordinarily we are only allowed to simplify immutable functions. But for
+ * purposes of estimation, we consider it okay to simplify functions that
+ * are merely stable; the risk that the result might change from planning
+ * time to execution time is worth taking in preference to not being able
+ * to estimate the value at all.
+ */
+ if (provolatile == PROVOLATILE_IMMUTABLE)
+ return true;
+ if (context->estimate && provolatile == PROVOLATILE_STABLE)
+ return true;
+ return false;
+}
+
+/*
+ * Subroutine for eval_const_expressions: process arguments of an OR clause
+ *
+ * This includes flattening of nested ORs as well as recursion to
+ * eval_const_expressions to simplify the OR arguments.
+ *
+ * After simplification, OR arguments are handled as follows:
+ * non constant: keep
+ * FALSE: drop (does not affect result)
+ * TRUE: force result to TRUE
+ * NULL: keep only one
+ * We must keep one NULL input because OR expressions evaluate to NULL when no
+ * input is TRUE and at least one is NULL. We don't actually include the NULL
+ * here, that's supposed to be done by the caller.
+ *
+ * The output arguments *haveNull and *forceTrue must be initialized false
+ * by the caller. They will be set true if a NULL constant or TRUE constant,
+ * respectively, is detected anywhere in the argument list.
+ */
+static List *
+simplify_or_arguments(List *args,
+ eval_const_expressions_context *context,
+ bool *haveNull, bool *forceTrue)
+{
+ List *newargs = NIL;
+ List *unprocessed_args;
+
+ /*
+ * We want to ensure that any OR immediately beneath another OR gets
+ * flattened into a single OR-list, so as to simplify later reasoning.
+ *
+ * To avoid stack overflow from recursion of eval_const_expressions, we
+ * resort to some tenseness here: we keep a list of not-yet-processed
+ * inputs, and handle flattening of nested ORs by prepending to the to-do
+ * list instead of recursing. Now that the parser generates N-argument
+ * ORs from simple lists, this complexity is probably less necessary than
+ * it once was, but we might as well keep the logic.
+ */
+ unprocessed_args = list_copy(args);
+ while (unprocessed_args)
+ {
+ Node *arg = (Node *) linitial(unprocessed_args);
+
+ unprocessed_args = list_delete_first(unprocessed_args);
+
+ /* flatten nested ORs as per above comment */
+ if (is_orclause(arg))
+ {
+ List *subargs = ((BoolExpr *) arg)->args;
+ List *oldlist = unprocessed_args;
+
+ unprocessed_args = list_concat_copy(subargs, unprocessed_args);
+ /* perhaps-overly-tense code to avoid leaking old lists */
+ list_free(oldlist);
+ continue;
+ }
+
+ /* If it's not an OR, simplify it */
+ arg = eval_const_expressions_mutator(arg, context);
+
+ /*
+ * It is unlikely but not impossible for simplification of a non-OR
+ * clause to produce an OR. Recheck, but don't be too tense about it
+ * since it's not a mainstream case. In particular we don't worry
+ * about const-simplifying the input twice, nor about list leakage.
+ */
+ if (is_orclause(arg))
+ {
+ List *subargs = ((BoolExpr *) arg)->args;
+
+ unprocessed_args = list_concat_copy(subargs, unprocessed_args);
+ continue;
+ }
+
+ /*
+ * OK, we have a const-simplified non-OR argument. Process it per
+ * comments above.
+ */
+ if (IsA(arg, Const))
+ {
+ Const *const_input = (Const *) arg;
+
+ if (const_input->constisnull)
+ *haveNull = true;
+ else if (DatumGetBool(const_input->constvalue))
+ {
+ *forceTrue = true;
+
+ /*
+ * Once we detect a TRUE result we can just exit the loop
+ * immediately. However, if we ever add a notion of
+ * non-removable functions, we'd need to keep scanning.
+ */
+ return NIL;
+ }
+ /* otherwise, we can drop the constant-false input */
+ continue;
+ }
+
+ /* else emit the simplified arg into the result list */
+ newargs = lappend(newargs, arg);
+ }
+
+ return newargs;
+}
+
+/*
+ * Subroutine for eval_const_expressions: process arguments of an AND clause
+ *
+ * This includes flattening of nested ANDs as well as recursion to
+ * eval_const_expressions to simplify the AND arguments.
+ *
+ * After simplification, AND arguments are handled as follows:
+ * non constant: keep
+ * TRUE: drop (does not affect result)
+ * FALSE: force result to FALSE
+ * NULL: keep only one
+ * We must keep one NULL input because AND expressions evaluate to NULL when
+ * no input is FALSE and at least one is NULL. We don't actually include the
+ * NULL here, that's supposed to be done by the caller.
+ *
+ * The output arguments *haveNull and *forceFalse must be initialized false
+ * by the caller. They will be set true if a null constant or false constant,
+ * respectively, is detected anywhere in the argument list.
+ */
+static List *
+simplify_and_arguments(List *args,
+ eval_const_expressions_context *context,
+ bool *haveNull, bool *forceFalse)
+{
+ List *newargs = NIL;
+ List *unprocessed_args;
+
+ /* See comments in simplify_or_arguments */
+ unprocessed_args = list_copy(args);
+ while (unprocessed_args)
+ {
+ Node *arg = (Node *) linitial(unprocessed_args);
+
+ unprocessed_args = list_delete_first(unprocessed_args);
+
+ /* flatten nested ANDs as per above comment */
+ if (is_andclause(arg))
+ {
+ List *subargs = ((BoolExpr *) arg)->args;
+ List *oldlist = unprocessed_args;
+
+ unprocessed_args = list_concat_copy(subargs, unprocessed_args);
+ /* perhaps-overly-tense code to avoid leaking old lists */
+ list_free(oldlist);
+ continue;
+ }
+
+ /* If it's not an AND, simplify it */
+ arg = eval_const_expressions_mutator(arg, context);
+
+ /*
+ * It is unlikely but not impossible for simplification of a non-AND
+ * clause to produce an AND. Recheck, but don't be too tense about it
+ * since it's not a mainstream case. In particular we don't worry
+ * about const-simplifying the input twice, nor about list leakage.
+ */
+ if (is_andclause(arg))
+ {
+ List *subargs = ((BoolExpr *) arg)->args;
+
+ unprocessed_args = list_concat_copy(subargs, unprocessed_args);
+ continue;
+ }
+
+ /*
+ * OK, we have a const-simplified non-AND argument. Process it per
+ * comments above.
+ */
+ if (IsA(arg, Const))
+ {
+ Const *const_input = (Const *) arg;
+
+ if (const_input->constisnull)
+ *haveNull = true;
+ else if (!DatumGetBool(const_input->constvalue))
+ {
+ *forceFalse = true;
+
+ /*
+ * Once we detect a FALSE result we can just exit the loop
+ * immediately. However, if we ever add a notion of
+ * non-removable functions, we'd need to keep scanning.
+ */
+ return NIL;
+ }
+ /* otherwise, we can drop the constant-true input */
+ continue;
+ }
+
+ /* else emit the simplified arg into the result list */
+ newargs = lappend(newargs, arg);
+ }
+
+ return newargs;
+}
+
+/*
+ * Subroutine for eval_const_expressions: try to simplify boolean equality
+ * or inequality condition
+ *
+ * Inputs are the operator OID and the simplified arguments to the operator.
+ * Returns a simplified expression if successful, or NULL if cannot
+ * simplify the expression.
+ *
+ * The idea here is to reduce "x = true" to "x" and "x = false" to "NOT x",
+ * or similarly "x <> true" to "NOT x" and "x <> false" to "x".
+ * This is only marginally useful in itself, but doing it in constant folding
+ * ensures that we will recognize these forms as being equivalent in, for
+ * example, partial index matching.
+ *
+ * We come here only if simplify_function has failed; therefore we cannot
+ * see two constant inputs, nor a constant-NULL input.
+ */
+static Node *
+simplify_boolean_equality(Oid opno, List *args)
+{
+ Node *leftop;
+ Node *rightop;
+
+ Assert(list_length(args) == 2);
+ leftop = linitial(args);
+ rightop = lsecond(args);
+ if (leftop && IsA(leftop, Const))
+ {
+ Assert(!((Const *) leftop)->constisnull);
+ if (opno == BooleanEqualOperator)
+ {
+ if (DatumGetBool(((Const *) leftop)->constvalue))
+ return rightop; /* true = foo */
+ else
+ return negate_clause(rightop); /* false = foo */
+ }
+ else
+ {
+ if (DatumGetBool(((Const *) leftop)->constvalue))
+ return negate_clause(rightop); /* true <> foo */
+ else
+ return rightop; /* false <> foo */
+ }
+ }
+ if (rightop && IsA(rightop, Const))
+ {
+ Assert(!((Const *) rightop)->constisnull);
+ if (opno == BooleanEqualOperator)
+ {
+ if (DatumGetBool(((Const *) rightop)->constvalue))
+ return leftop; /* foo = true */
+ else
+ return negate_clause(leftop); /* foo = false */
+ }
+ else
+ {
+ if (DatumGetBool(((Const *) rightop)->constvalue))
+ return negate_clause(leftop); /* foo <> true */
+ else
+ return leftop; /* foo <> false */
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Subroutine for eval_const_expressions: try to simplify a function call
+ * (which might originally have been an operator; we don't care)
+ *
+ * Inputs are the function OID, actual result type OID (which is needed for
+ * polymorphic functions), result typmod, result collation, the input
+ * collation to use for the function, the original argument list (not
+ * const-simplified yet, unless process_args is false), and some flags;
+ * also the context data for eval_const_expressions.
+ *
+ * Returns a simplified expression if successful, or NULL if cannot
+ * simplify the function call.
+ *
+ * This function is also responsible for converting named-notation argument
+ * lists into positional notation and/or adding any needed default argument
+ * expressions; which is a bit grotty, but it avoids extra fetches of the
+ * function's pg_proc tuple. For this reason, the args list is
+ * pass-by-reference. Conversion and const-simplification of the args list
+ * will be done even if simplification of the function call itself is not
+ * possible.
+ */
+static Expr *
+simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
+ Oid result_collid, Oid input_collid, List **args_p,
+ bool funcvariadic, bool process_args, bool allow_non_const,
+ eval_const_expressions_context *context)
+{
+ List *args = *args_p;
+ HeapTuple func_tuple;
+ Form_pg_proc func_form;
+ Expr *newexpr;
+
+ /*
+ * We have three strategies for simplification: execute the function to
+ * deliver a constant result, use a transform function to generate a
+ * substitute node tree, or expand in-line the body of the function
+ * definition (which only works for simple SQL-language functions, but
+ * that is a common case). Each case needs access to the function's
+ * pg_proc tuple, so fetch it just once.
+ *
+ * Note: the allow_non_const flag suppresses both the second and third
+ * strategies; so if !allow_non_const, simplify_function can only return a
+ * Const or NULL. Argument-list rewriting happens anyway, though.
+ */
+ func_tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
+ if (!HeapTupleIsValid(func_tuple))
+ elog(ERROR, "cache lookup failed for function %u", funcid);
+ func_form = (Form_pg_proc) GETSTRUCT(func_tuple);
+
+ /*
+ * Process the function arguments, unless the caller did it already.
+ *
+ * Here we must deal with named or defaulted arguments, and then
+ * recursively apply eval_const_expressions to the whole argument list.
+ */
+ if (process_args)
+ {
+ args = expand_function_arguments(args, false, result_type, func_tuple);
+ args = (List *) expression_tree_mutator((Node *) args,
+ eval_const_expressions_mutator,
+ (void *) context);
+ /* Argument processing done, give it back to the caller */
+ *args_p = args;
+ }
+
+ /* Now attempt simplification of the function call proper. */
+
+ newexpr = evaluate_function(funcid, result_type, result_typmod,
+ result_collid, input_collid,
+ args, funcvariadic,
+ func_tuple, context);
+
+ if (!newexpr && allow_non_const && OidIsValid(func_form->prosupport))
+ {
+ /*
+ * Build a SupportRequestSimplify node to pass to the support
+ * function, pointing to a dummy FuncExpr node containing the
+ * simplified arg list. We use this approach to present a uniform
+ * interface to the support function regardless of how the target
+ * function is actually being invoked.
+ */
+ SupportRequestSimplify req;
+ FuncExpr fexpr;
+
+ fexpr.xpr.type = T_FuncExpr;
+ fexpr.funcid = funcid;
+ fexpr.funcresulttype = result_type;
+ fexpr.funcretset = func_form->proretset;
+ fexpr.funcvariadic = funcvariadic;
+ fexpr.funcformat = COERCE_EXPLICIT_CALL;
+ fexpr.funccollid = result_collid;
+ fexpr.inputcollid = input_collid;
+ fexpr.args = args;
+ fexpr.location = -1;
+
+ req.type = T_SupportRequestSimplify;
+ req.root = context->root;
+ req.fcall = &fexpr;
+
+ newexpr = (Expr *)
+ DatumGetPointer(OidFunctionCall1(func_form->prosupport,
+ PointerGetDatum(&req)));
+
+ /* catch a possible API misunderstanding */
+ Assert(newexpr != (Expr *) &fexpr);
+ }
+
+ if (!newexpr && allow_non_const)
+ newexpr = inline_function(funcid, result_type, result_collid,
+ input_collid, args, funcvariadic,
+ func_tuple, context);
+
+ ReleaseSysCache(func_tuple);
+
+ return newexpr;
+}
+
+/*
+ * expand_function_arguments: convert named-notation args to positional args
+ * and/or insert default args, as needed
+ *
+ * Returns a possibly-transformed version of the args list.
+ *
+ * If include_out_arguments is true, then the args list and the result
+ * include OUT arguments.
+ *
+ * The expected result type of the call must be given, for sanity-checking
+ * purposes. Also, we ask the caller to provide the function's actual
+ * pg_proc tuple, not just its OID.
+ *
+ * If we need to change anything, the input argument list is copied, not
+ * modified.
+ *
+ * Note: this gets applied to operator argument lists too, even though the
+ * cases it handles should never occur there. This should be OK since it
+ * will fall through very quickly if there's nothing to do.
+ */
+List *
+expand_function_arguments(List *args, bool include_out_arguments,
+ Oid result_type, HeapTuple func_tuple)
+{
+ Form_pg_proc funcform = (Form_pg_proc) GETSTRUCT(func_tuple);
+ Oid *proargtypes = funcform->proargtypes.values;
+ int pronargs = funcform->pronargs;
+ bool has_named_args = false;
+ ListCell *lc;
+
+ /*
+ * If we are asked to match to OUT arguments, then use the proallargtypes
+ * array (which includes those); otherwise use proargtypes (which
+ * doesn't). Of course, if proallargtypes is null, we always use
+ * proargtypes. (Fetching proallargtypes is annoyingly expensive
+ * considering that we may have nothing to do here, but fortunately the
+ * common case is include_out_arguments == false.)
+ */
+ if (include_out_arguments)
+ {
+ Datum proallargtypes;
+ bool isNull;
+
+ proallargtypes = SysCacheGetAttr(PROCOID, func_tuple,
+ Anum_pg_proc_proallargtypes,
+ &isNull);
+ if (!isNull)
+ {
+ ArrayType *arr = DatumGetArrayTypeP(proallargtypes);
+
+ pronargs = ARR_DIMS(arr)[0];
+ if (ARR_NDIM(arr) != 1 ||
+ pronargs < 0 ||
+ ARR_HASNULL(arr) ||
+ ARR_ELEMTYPE(arr) != OIDOID)
+ elog(ERROR, "proallargtypes is not a 1-D Oid array or it contains nulls");
+ Assert(pronargs >= funcform->pronargs);
+ proargtypes = (Oid *) ARR_DATA_PTR(arr);
+ }
+ }
+
+ /* Do we have any named arguments? */
+ foreach(lc, args)
+ {
+ Node *arg = (Node *) lfirst(lc);
+
+ if (IsA(arg, NamedArgExpr))
+ {
+ has_named_args = true;
+ break;
+ }
+ }
+
+ /* If so, we must apply reorder_function_arguments */
+ if (has_named_args)
+ {
+ args = reorder_function_arguments(args, pronargs, func_tuple);
+ /* Recheck argument types and add casts if needed */
+ recheck_cast_function_args(args, result_type,
+ proargtypes, pronargs,
+ func_tuple);
+ }
+ else if (list_length(args) < pronargs)
+ {
+ /* No named args, but we seem to be short some defaults */
+ args = add_function_defaults(args, pronargs, func_tuple);
+ /* Recheck argument types and add casts if needed */
+ recheck_cast_function_args(args, result_type,
+ proargtypes, pronargs,
+ func_tuple);
+ }
+
+ return args;
+}
+
+/*
+ * reorder_function_arguments: convert named-notation args to positional args
+ *
+ * This function also inserts default argument values as needed, since it's
+ * impossible to form a truly valid positional call without that.
+ */
+static List *
+reorder_function_arguments(List *args, int pronargs, HeapTuple func_tuple)
+{
+ Form_pg_proc funcform = (Form_pg_proc) GETSTRUCT(func_tuple);
+ int nargsprovided = list_length(args);
+ Node *argarray[FUNC_MAX_ARGS];
+ ListCell *lc;
+ int i;
+
+ Assert(nargsprovided <= pronargs);
+ if (pronargs < 0 || pronargs > FUNC_MAX_ARGS)
+ elog(ERROR, "too many function arguments");
+ memset(argarray, 0, pronargs * sizeof(Node *));
+
+ /* Deconstruct the argument list into an array indexed by argnumber */
+ i = 0;
+ foreach(lc, args)
+ {
+ Node *arg = (Node *) lfirst(lc);
+
+ if (!IsA(arg, NamedArgExpr))
+ {
+ /* positional argument, assumed to precede all named args */
+ Assert(argarray[i] == NULL);
+ argarray[i++] = arg;
+ }
+ else
+ {
+ NamedArgExpr *na = (NamedArgExpr *) arg;
+
+ Assert(na->argnumber >= 0 && na->argnumber < pronargs);
+ Assert(argarray[na->argnumber] == NULL);
+ argarray[na->argnumber] = (Node *) na->arg;
+ }
+ }
+
+ /*
+ * Fetch default expressions, if needed, and insert into array at proper
+ * locations (they aren't necessarily consecutive or all used)
+ */
+ if (nargsprovided < pronargs)
+ {
+ List *defaults = fetch_function_defaults(func_tuple);
+
+ i = pronargs - funcform->pronargdefaults;
+ foreach(lc, defaults)
+ {
+ if (argarray[i] == NULL)
+ argarray[i] = (Node *) lfirst(lc);
+ i++;
+ }
+ }
+
+ /* Now reconstruct the args list in proper order */
+ args = NIL;
+ for (i = 0; i < pronargs; i++)
+ {
+ Assert(argarray[i] != NULL);
+ args = lappend(args, argarray[i]);
+ }
+
+ return args;
+}
+
+/*
+ * add_function_defaults: add missing function arguments from its defaults
+ *
+ * This is used only when the argument list was positional to begin with,
+ * and so we know we just need to add defaults at the end.
+ */
+static List *
+add_function_defaults(List *args, int pronargs, HeapTuple func_tuple)
+{
+ int nargsprovided = list_length(args);
+ List *defaults;
+ int ndelete;
+
+ /* Get all the default expressions from the pg_proc tuple */
+ defaults = fetch_function_defaults(func_tuple);
+
+ /* Delete any unused defaults from the list */
+ ndelete = nargsprovided + list_length(defaults) - pronargs;
+ if (ndelete < 0)
+ elog(ERROR, "not enough default arguments");
+ if (ndelete > 0)
+ defaults = list_delete_first_n(defaults, ndelete);
+
+ /* And form the combined argument list, not modifying the input list */
+ return list_concat_copy(args, defaults);
+}
+
+/*
+ * fetch_function_defaults: get function's default arguments as expression list
+ */
+static List *
+fetch_function_defaults(HeapTuple func_tuple)
+{
+ List *defaults;
+ Datum proargdefaults;
+ bool isnull;
+ char *str;
+
+ /* The error cases here shouldn't happen, but check anyway */
+ proargdefaults = SysCacheGetAttr(PROCOID, func_tuple,
+ Anum_pg_proc_proargdefaults,
+ &isnull);
+ if (isnull)
+ elog(ERROR, "not enough default arguments");
+ str = TextDatumGetCString(proargdefaults);
+ defaults = castNode(List, stringToNode(str));
+ pfree(str);
+ return defaults;
+}
+
+/*
+ * recheck_cast_function_args: recheck function args and typecast as needed
+ * after adding defaults.
+ *
+ * It is possible for some of the defaulted arguments to be polymorphic;
+ * therefore we can't assume that the default expressions have the correct
+ * data types already. We have to re-resolve polymorphics and do coercion
+ * just like the parser did.
+ *
+ * This should be a no-op if there are no polymorphic arguments,
+ * but we do it anyway to be sure.
+ *
+ * Note: if any casts are needed, the args list is modified in-place;
+ * caller should have already copied the list structure.
+ */
+static void
+recheck_cast_function_args(List *args, Oid result_type,
+ Oid *proargtypes, int pronargs,
+ HeapTuple func_tuple)
+{
+ Form_pg_proc funcform = (Form_pg_proc) GETSTRUCT(func_tuple);
+ int nargs;
+ Oid actual_arg_types[FUNC_MAX_ARGS];
+ Oid declared_arg_types[FUNC_MAX_ARGS];
+ Oid rettype;
+ ListCell *lc;
+
+ if (list_length(args) > FUNC_MAX_ARGS)
+ elog(ERROR, "too many function arguments");
+ nargs = 0;
+ foreach(lc, args)
+ {
+ actual_arg_types[nargs++] = exprType((Node *) lfirst(lc));
+ }
+ Assert(nargs == pronargs);
+ memcpy(declared_arg_types, proargtypes, pronargs * sizeof(Oid));
+ rettype = enforce_generic_type_consistency(actual_arg_types,
+ declared_arg_types,
+ nargs,
+ funcform->prorettype,
+ false);
+ /* let's just check we got the same answer as the parser did ... */
+ if (rettype != result_type)
+ elog(ERROR, "function's resolved result type changed during planning");
+
+ /* perform any necessary typecasting of arguments */
+ make_fn_arguments(NULL, args, actual_arg_types, declared_arg_types);
+}
+
+/*
+ * evaluate_function: try to pre-evaluate a function call
+ *
+ * We can do this if the function is strict and has any constant-null inputs
+ * (just return a null constant), or if the function is immutable and has all
+ * constant inputs (call it and return the result as a Const node). In
+ * estimation mode we are willing to pre-evaluate stable functions too.
+ *
+ * Returns a simplified expression if successful, or NULL if cannot
+ * simplify the function.
+ */
+static Expr *
+evaluate_function(Oid funcid, Oid result_type, int32 result_typmod,
+ Oid result_collid, Oid input_collid, List *args,
+ bool funcvariadic,
+ HeapTuple func_tuple,
+ eval_const_expressions_context *context)
+{
+ Form_pg_proc funcform = (Form_pg_proc) GETSTRUCT(func_tuple);
+ bool has_nonconst_input = false;
+ bool has_null_input = false;
+ ListCell *arg;
+ FuncExpr *newexpr;
+
+ /*
+ * Can't simplify if it returns a set.
+ */
+ if (funcform->proretset)
+ return NULL;
+
+ /*
+ * Can't simplify if it returns RECORD. The immediate problem is that it
+ * will be needing an expected tupdesc which we can't supply here.
+ *
+ * In the case where it has OUT parameters, it could get by without an
+ * expected tupdesc, but we still have issues: get_expr_result_type()
+ * doesn't know how to extract type info from a RECORD constant, and in
+ * the case of a NULL function result there doesn't seem to be any clean
+ * way to fix that. In view of the likelihood of there being still other
+ * gotchas, seems best to leave the function call unreduced.
+ */
+ if (funcform->prorettype == RECORDOID)
+ return NULL;
+
+ /*
+ * Check for constant inputs and especially constant-NULL inputs.
+ */
+ foreach(arg, args)
+ {
+ if (IsA(lfirst(arg), Const))
+ has_null_input |= ((Const *) lfirst(arg))->constisnull;
+ else
+ has_nonconst_input = true;
+ }
+
+ /*
+ * If the function is strict and has a constant-NULL input, it will never
+ * be called at all, so we can replace the call by a NULL constant, even
+ * if there are other inputs that aren't constant, and even if the
+ * function is not otherwise immutable.
+ */
+ if (funcform->proisstrict && has_null_input)
+ return (Expr *) makeNullConst(result_type, result_typmod,
+ result_collid);
+
+ /*
+ * Otherwise, can simplify only if all inputs are constants. (For a
+ * non-strict function, constant NULL inputs are treated the same as
+ * constant non-NULL inputs.)
+ */
+ if (has_nonconst_input)
+ return NULL;
+
+ /*
+ * Ordinarily we are only allowed to simplify immutable functions. But for
+ * purposes of estimation, we consider it okay to simplify functions that
+ * are merely stable; the risk that the result might change from planning
+ * time to execution time is worth taking in preference to not being able
+ * to estimate the value at all.
+ */
+ if (funcform->provolatile == PROVOLATILE_IMMUTABLE)
+ /* okay */ ;
+ else if (context->estimate && funcform->provolatile == PROVOLATILE_STABLE)
+ /* okay */ ;
+ else
+ return NULL;
+
+ /*
+ * OK, looks like we can simplify this operator/function.
+ *
+ * Build a new FuncExpr node containing the already-simplified arguments.
+ */
+ newexpr = makeNode(FuncExpr);
+ newexpr->funcid = funcid;
+ newexpr->funcresulttype = result_type;
+ newexpr->funcretset = false;
+ newexpr->funcvariadic = funcvariadic;
+ newexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
+ newexpr->funccollid = result_collid; /* doesn't matter */
+ newexpr->inputcollid = input_collid;
+ newexpr->args = args;
+ newexpr->location = -1;
+
+ return evaluate_expr((Expr *) newexpr, result_type, result_typmod,
+ result_collid);
+}
+
+/*
+ * inline_function: try to expand a function call inline
+ *
+ * If the function is a sufficiently simple SQL-language function
+ * (just "SELECT expression"), then we can inline it and avoid the rather
+ * high per-call overhead of SQL functions. Furthermore, this can expose
+ * opportunities for constant-folding within the function expression.
+ *
+ * We have to beware of some special cases however. A directly or
+ * indirectly recursive function would cause us to recurse forever,
+ * so we keep track of which functions we are already expanding and
+ * do not re-expand them. Also, if a parameter is used more than once
+ * in the SQL-function body, we require it not to contain any volatile
+ * functions (volatiles might deliver inconsistent answers) nor to be
+ * unreasonably expensive to evaluate. The expensiveness check not only
+ * prevents us from doing multiple evaluations of an expensive parameter
+ * at runtime, but is a safety value to limit growth of an expression due
+ * to repeated inlining.
+ *
+ * We must also beware of changing the volatility or strictness status of
+ * functions by inlining them.
+ *
+ * Also, at the moment we can't inline functions returning RECORD. This
+ * doesn't work in the general case because it discards information such
+ * as OUT-parameter declarations.
+ *
+ * Also, context-dependent expression nodes in the argument list are trouble.
+ *
+ * Returns a simplified expression if successful, or NULL if cannot
+ * simplify the function.
+ */
+static Expr *
+inline_function(Oid funcid, Oid result_type, Oid result_collid,
+ Oid input_collid, List *args,
+ bool funcvariadic,
+ HeapTuple func_tuple,
+ eval_const_expressions_context *context)
+{
+ Form_pg_proc funcform = (Form_pg_proc) GETSTRUCT(func_tuple);
+ char *src;
+ Datum tmp;
+ bool isNull;
+ MemoryContext oldcxt;
+ MemoryContext mycxt;
+ inline_error_callback_arg callback_arg;
+ ErrorContextCallback sqlerrcontext;
+ FuncExpr *fexpr;
+ SQLFunctionParseInfoPtr pinfo;
+ TupleDesc rettupdesc;
+ ParseState *pstate;
+ List *raw_parsetree_list;
+ List *querytree_list;
+ Query *querytree;
+ Node *newexpr;
+ int *usecounts;
+ ListCell *arg;
+ int i;
+
+ /*
+ * Forget it if the function is not SQL-language or has other showstopper
+ * properties. (The prokind and nargs checks are just paranoia.)
+ */
+ if (funcform->prolang != SQLlanguageId ||
+ funcform->prokind != PROKIND_FUNCTION ||
+ funcform->prosecdef ||
+ funcform->proretset ||
+ funcform->prorettype == RECORDOID ||
+ !heap_attisnull(func_tuple, Anum_pg_proc_proconfig, NULL) ||
+ funcform->pronargs != list_length(args))
+ return NULL;
+
+ /* Check for recursive function, and give up trying to expand if so */
+ if (list_member_oid(context->active_fns, funcid))
+ return NULL;
+
+ /* Check permission to call function (fail later, if not) */
+ if (pg_proc_aclcheck(funcid, GetUserId(), ACL_EXECUTE) != ACLCHECK_OK)
+ return NULL;
+
+ /* Check whether a plugin wants to hook function entry/exit */
+ if (FmgrHookIsNeeded(funcid))
+ return NULL;
+
+ /*
+ * Make a temporary memory context, so that we don't leak all the stuff
+ * that parsing might create.
+ */
+ mycxt = AllocSetContextCreate(CurrentMemoryContext,
+ "inline_function",
+ ALLOCSET_DEFAULT_SIZES);
+ oldcxt = MemoryContextSwitchTo(mycxt);
+
+ /*
+ * We need a dummy FuncExpr node containing the already-simplified
+ * arguments. (In some cases we don't really need it, but building it is
+ * cheap enough that it's not worth contortions to avoid.)
+ */
+ fexpr = makeNode(FuncExpr);
+ fexpr->funcid = funcid;
+ fexpr->funcresulttype = result_type;
+ fexpr->funcretset = false;
+ fexpr->funcvariadic = funcvariadic;
+ fexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
+ fexpr->funccollid = result_collid; /* doesn't matter */
+ fexpr->inputcollid = input_collid;
+ fexpr->args = args;
+ fexpr->location = -1;
+
+ /* Fetch the function body */
+ tmp = SysCacheGetAttr(PROCOID,
+ func_tuple,
+ Anum_pg_proc_prosrc,
+ &isNull);
+ if (isNull)
+ elog(ERROR, "null prosrc for function %u", funcid);
+ src = TextDatumGetCString(tmp);
+
+ /*
+ * Setup error traceback support for ereport(). This is so that we can
+ * finger the function that bad information came from.
+ */
+ callback_arg.proname = NameStr(funcform->proname);
+ callback_arg.prosrc = src;
+
+ sqlerrcontext.callback = sql_inline_error_callback;
+ sqlerrcontext.arg = (void *) &callback_arg;
+ sqlerrcontext.previous = error_context_stack;
+ error_context_stack = &sqlerrcontext;
+
+ /* If we have prosqlbody, pay attention to that not prosrc */
+ tmp = SysCacheGetAttr(PROCOID,
+ func_tuple,
+ Anum_pg_proc_prosqlbody,
+ &isNull);
+ if (!isNull)
+ {
+ Node *n;
+ List *querytree_list;
+
+ n = stringToNode(TextDatumGetCString(tmp));
+ if (IsA(n, List))
+ querytree_list = linitial_node(List, castNode(List, n));
+ else
+ querytree_list = list_make1(n);
+ if (list_length(querytree_list) != 1)
+ goto fail;
+ querytree = linitial(querytree_list);
+
+ /*
+ * Because we'll insist below that the querytree have an empty rtable
+ * and no sublinks, it cannot have any relation references that need
+ * to be locked or rewritten. So we can omit those steps.
+ */
+ }
+ else
+ {
+ /* Set up to handle parameters while parsing the function body. */
+ pinfo = prepare_sql_fn_parse_info(func_tuple,
+ (Node *) fexpr,
+ input_collid);
+
+ /*
+ * We just do parsing and parse analysis, not rewriting, because
+ * rewriting will not affect table-free-SELECT-only queries, which is
+ * all that we care about. Also, we can punt as soon as we detect
+ * more than one command in the function body.
+ */
+ raw_parsetree_list = pg_parse_query(src);
+ if (list_length(raw_parsetree_list) != 1)
+ goto fail;
+
+ pstate = make_parsestate(NULL);
+ pstate->p_sourcetext = src;
+ sql_fn_parser_setup(pstate, pinfo);
+
+ querytree = transformTopLevelStmt(pstate, linitial(raw_parsetree_list));
+
+ free_parsestate(pstate);
+ }
+
+ /*
+ * The single command must be a simple "SELECT expression".
+ *
+ * Note: if you change the tests involved in this, see also plpgsql's
+ * exec_simple_check_plan(). That generally needs to have the same idea
+ * of what's a "simple expression", so that inlining a function that
+ * previously wasn't inlined won't change plpgsql's conclusion.
+ */
+ if (!IsA(querytree, Query) ||
+ querytree->commandType != CMD_SELECT ||
+ querytree->hasAggs ||
+ querytree->hasWindowFuncs ||
+ querytree->hasTargetSRFs ||
+ querytree->hasSubLinks ||
+ querytree->cteList ||
+ querytree->rtable ||
+ querytree->jointree->fromlist ||
+ querytree->jointree->quals ||
+ querytree->groupClause ||
+ querytree->groupingSets ||
+ querytree->havingQual ||
+ querytree->windowClause ||
+ querytree->distinctClause ||
+ querytree->sortClause ||
+ querytree->limitOffset ||
+ querytree->limitCount ||
+ querytree->setOperations ||
+ list_length(querytree->targetList) != 1)
+ goto fail;
+
+ /* If the function result is composite, resolve it */
+ (void) get_expr_result_type((Node *) fexpr,
+ NULL,
+ &rettupdesc);
+
+ /*
+ * Make sure the function (still) returns what it's declared to. This
+ * will raise an error if wrong, but that's okay since the function would
+ * fail at runtime anyway. Note that check_sql_fn_retval will also insert
+ * a coercion if needed to make the tlist expression match the declared
+ * type of the function.
+ *
+ * Note: we do not try this until we have verified that no rewriting was
+ * needed; that's probably not important, but let's be careful.
+ */
+ querytree_list = list_make1(querytree);
+ if (check_sql_fn_retval(list_make1(querytree_list),
+ result_type, rettupdesc,
+ false, NULL))
+ goto fail; /* reject whole-tuple-result cases */
+
+ /*
+ * Given the tests above, check_sql_fn_retval shouldn't have decided to
+ * inject a projection step, but let's just make sure.
+ */
+ if (querytree != linitial(querytree_list))
+ goto fail;
+
+ /* Now we can grab the tlist expression */
+ newexpr = (Node *) ((TargetEntry *) linitial(querytree->targetList))->expr;
+
+ /*
+ * If the SQL function returns VOID, we can only inline it if it is a
+ * SELECT of an expression returning VOID (ie, it's just a redirection to
+ * another VOID-returning function). In all non-VOID-returning cases,
+ * check_sql_fn_retval should ensure that newexpr returns the function's
+ * declared result type, so this test shouldn't fail otherwise; but we may
+ * as well cope gracefully if it does.
+ */
+ if (exprType(newexpr) != result_type)
+ goto fail;
+
+ /*
+ * Additional validity checks on the expression. It mustn't be more
+ * volatile than the surrounding function (this is to avoid breaking hacks
+ * that involve pretending a function is immutable when it really ain't).
+ * If the surrounding function is declared strict, then the expression
+ * must contain only strict constructs and must use all of the function
+ * parameters (this is overkill, but an exact analysis is hard).
+ */
+ if (funcform->provolatile == PROVOLATILE_IMMUTABLE &&
+ contain_mutable_functions(newexpr))
+ goto fail;
+ else if (funcform->provolatile == PROVOLATILE_STABLE &&
+ contain_volatile_functions(newexpr))
+ goto fail;
+
+ if (funcform->proisstrict &&
+ contain_nonstrict_functions(newexpr))
+ goto fail;
+
+ /*
+ * If any parameter expression contains a context-dependent node, we can't
+ * inline, for fear of putting such a node into the wrong context.
+ */
+ if (contain_context_dependent_node((Node *) args))
+ goto fail;
+
+ /*
+ * We may be able to do it; there are still checks on parameter usage to
+ * make, but those are most easily done in combination with the actual
+ * substitution of the inputs. So start building expression with inputs
+ * substituted.
+ */
+ usecounts = (int *) palloc0(funcform->pronargs * sizeof(int));
+ newexpr = substitute_actual_parameters(newexpr, funcform->pronargs,
+ args, usecounts);
+
+ /* Now check for parameter usage */
+ i = 0;
+ foreach(arg, args)
+ {
+ Node *param = lfirst(arg);
+
+ if (usecounts[i] == 0)
+ {
+ /* Param not used at all: uncool if func is strict */
+ if (funcform->proisstrict)
+ goto fail;
+ }
+ else if (usecounts[i] != 1)
+ {
+ /* Param used multiple times: uncool if expensive or volatile */
+ QualCost eval_cost;
+
+ /*
+ * We define "expensive" as "contains any subplan or more than 10
+ * operators". Note that the subplan search has to be done
+ * explicitly, since cost_qual_eval() will barf on unplanned
+ * subselects.
+ */
+ if (contain_subplans(param))
+ goto fail;
+ cost_qual_eval(&eval_cost, list_make1(param), NULL);
+ if (eval_cost.startup + eval_cost.per_tuple >
+ 10 * cpu_operator_cost)
+ goto fail;
+
+ /*
+ * Check volatility last since this is more expensive than the
+ * above tests
+ */
+ if (contain_volatile_functions(param))
+ goto fail;
+ }
+ i++;
+ }
+
+ /*
+ * Whew --- we can make the substitution. Copy the modified expression
+ * out of the temporary memory context, and clean up.
+ */
+ MemoryContextSwitchTo(oldcxt);
+
+ newexpr = copyObject(newexpr);
+
+ MemoryContextDelete(mycxt);
+
+ /*
+ * If the result is of a collatable type, force the result to expose the
+ * correct collation. In most cases this does not matter, but it's
+ * possible that the function result is used directly as a sort key or in
+ * other places where we expect exprCollation() to tell the truth.
+ */
+ if (OidIsValid(result_collid))
+ {
+ Oid exprcoll = exprCollation(newexpr);
+
+ if (OidIsValid(exprcoll) && exprcoll != result_collid)
+ {
+ CollateExpr *newnode = makeNode(CollateExpr);
+
+ newnode->arg = (Expr *) newexpr;
+ newnode->collOid = result_collid;
+ newnode->location = -1;
+
+ newexpr = (Node *) newnode;
+ }
+ }
+
+ /*
+ * Since there is now no trace of the function in the plan tree, we must
+ * explicitly record the plan's dependency on the function.
+ */
+ if (context->root)
+ record_plan_function_dependency(context->root, funcid);
+
+ /*
+ * Recursively try to simplify the modified expression. Here we must add
+ * the current function to the context list of active functions.
+ */
+ context->active_fns = lappend_oid(context->active_fns, funcid);
+ newexpr = eval_const_expressions_mutator(newexpr, context);
+ context->active_fns = list_delete_last(context->active_fns);
+
+ error_context_stack = sqlerrcontext.previous;
+
+ return (Expr *) newexpr;
+
+ /* Here if func is not inlinable: release temp memory and return NULL */
+fail:
+ MemoryContextSwitchTo(oldcxt);
+ MemoryContextDelete(mycxt);
+ error_context_stack = sqlerrcontext.previous;
+
+ return NULL;
+}
+
+/*
+ * Replace Param nodes by appropriate actual parameters
+ */
+static Node *
+substitute_actual_parameters(Node *expr, int nargs, List *args,
+ int *usecounts)
+{
+ substitute_actual_parameters_context context;
+
+ context.nargs = nargs;
+ context.args = args;
+ context.usecounts = usecounts;
+
+ return substitute_actual_parameters_mutator(expr, &context);
+}
+
+static Node *
+substitute_actual_parameters_mutator(Node *node,
+ substitute_actual_parameters_context *context)
+{
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, Param))
+ {
+ Param *param = (Param *) node;
+
+ if (param->paramkind != PARAM_EXTERN)
+ elog(ERROR, "unexpected paramkind: %d", (int) param->paramkind);
+ if (param->paramid <= 0 || param->paramid > context->nargs)
+ elog(ERROR, "invalid paramid: %d", param->paramid);
+
+ /* Count usage of parameter */
+ context->usecounts[param->paramid - 1]++;
+
+ /* Select the appropriate actual arg and replace the Param with it */
+ /* We don't need to copy at this time (it'll get done later) */
+ return list_nth(context->args, param->paramid - 1);
+ }
+ return expression_tree_mutator(node, substitute_actual_parameters_mutator,
+ (void *) context);
+}
+
+/*
+ * error context callback to let us supply a call-stack traceback
+ */
+static void
+sql_inline_error_callback(void *arg)
+{
+ inline_error_callback_arg *callback_arg = (inline_error_callback_arg *) arg;
+ int syntaxerrposition;
+
+ /* If it's a syntax error, convert to internal syntax error report */
+ syntaxerrposition = geterrposition();
+ if (syntaxerrposition > 0)
+ {
+ errposition(0);
+ internalerrposition(syntaxerrposition);
+ internalerrquery(callback_arg->prosrc);
+ }
+
+ errcontext("SQL function \"%s\" during inlining", callback_arg->proname);
+}
+
+/*
+ * evaluate_expr: pre-evaluate a constant expression
+ *
+ * We use the executor's routine ExecEvalExpr() to avoid duplication of
+ * code and ensure we get the same result as the executor would get.
+ */
+Expr *
+evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod,
+ Oid result_collation)
+{
+ EState *estate;
+ ExprState *exprstate;
+ MemoryContext oldcontext;
+ Datum const_val;
+ bool const_is_null;
+ int16 resultTypLen;
+ bool resultTypByVal;
+
+ /*
+ * To use the executor, we need an EState.
+ */
+ estate = CreateExecutorState();
+
+ /* We can use the estate's working context to avoid memory leaks. */
+ oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
+
+ /* Make sure any opfuncids are filled in. */
+ fix_opfuncids((Node *) expr);
+
+ /*
+ * Prepare expr for execution. (Note: we can't use ExecPrepareExpr
+ * because it'd result in recursively invoking eval_const_expressions.)
+ */
+ exprstate = ExecInitExpr(expr, NULL);
+
+ /*
+ * And evaluate it.
+ *
+ * It is OK to use a default econtext because none of the ExecEvalExpr()
+ * code used in this situation will use econtext. That might seem
+ * fortuitous, but it's not so unreasonable --- a constant expression does
+ * not depend on context, by definition, n'est ce pas?
+ */
+ const_val = ExecEvalExprSwitchContext(exprstate,
+ GetPerTupleExprContext(estate),
+ &const_is_null);
+
+ /* Get info needed about result datatype */
+ get_typlenbyval(result_type, &resultTypLen, &resultTypByVal);
+
+ /* Get back to outer memory context */
+ MemoryContextSwitchTo(oldcontext);
+
+ /*
+ * Must copy result out of sub-context used by expression eval.
+ *
+ * Also, if it's varlena, forcibly detoast it. This protects us against
+ * storing TOAST pointers into plans that might outlive the referenced
+ * data. (makeConst would handle detoasting anyway, but it's worth a few
+ * extra lines here so that we can do the copy and detoast in one step.)
+ */
+ if (!const_is_null)
+ {
+ if (resultTypLen == -1)
+ const_val = PointerGetDatum(PG_DETOAST_DATUM_COPY(const_val));
+ else
+ const_val = datumCopy(const_val, resultTypByVal, resultTypLen);
+ }
+
+ /* Release all the junk we just created */
+ FreeExecutorState(estate);
+
+ /*
+ * Make the constant result node.
+ */
+ return (Expr *) makeConst(result_type, result_typmod, result_collation,
+ resultTypLen,
+ const_val, const_is_null,
+ resultTypByVal);
+}
+
+
+/*
+ * inline_set_returning_function
+ * Attempt to "inline" a set-returning function in the FROM clause.
+ *
+ * "rte" is an RTE_FUNCTION rangetable entry. If it represents a call of a
+ * set-returning SQL function that can safely be inlined, expand the function
+ * and return the substitute Query structure. Otherwise, return NULL.
+ *
+ * We assume that the RTE's expression has already been put through
+ * eval_const_expressions(), which among other things will take care of
+ * default arguments and named-argument notation.
+ *
+ * This has a good deal of similarity to inline_function(), but that's
+ * for the non-set-returning case, and there are enough differences to
+ * justify separate functions.
+ */
+Query *
+inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
+{
+ RangeTblFunction *rtfunc;
+ FuncExpr *fexpr;
+ Oid func_oid;
+ HeapTuple func_tuple;
+ Form_pg_proc funcform;
+ char *src;
+ Datum tmp;
+ bool isNull;
+ MemoryContext oldcxt;
+ MemoryContext mycxt;
+ inline_error_callback_arg callback_arg;
+ ErrorContextCallback sqlerrcontext;
+ SQLFunctionParseInfoPtr pinfo;
+ TypeFuncClass functypclass;
+ TupleDesc rettupdesc;
+ List *raw_parsetree_list;
+ List *querytree_list;
+ Query *querytree;
+
+ Assert(rte->rtekind == RTE_FUNCTION);
+
+ /*
+ * It doesn't make a lot of sense for a SQL SRF to refer to itself in its
+ * own FROM clause, since that must cause infinite recursion at runtime.
+ * It will cause this code to recurse too, so check for stack overflow.
+ * (There's no need to do more.)
+ */
+ check_stack_depth();
+
+ /* Fail if the RTE has ORDINALITY - we don't implement that here. */
+ if (rte->funcordinality)
+ return NULL;
+
+ /* Fail if RTE isn't a single, simple FuncExpr */
+ if (list_length(rte->functions) != 1)
+ return NULL;
+ rtfunc = (RangeTblFunction *) linitial(rte->functions);
+
+ if (!IsA(rtfunc->funcexpr, FuncExpr))
+ return NULL;
+ fexpr = (FuncExpr *) rtfunc->funcexpr;
+
+ func_oid = fexpr->funcid;
+
+ /*
+ * The function must be declared to return a set, else inlining would
+ * change the results if the contained SELECT didn't return exactly one
+ * row.
+ */
+ if (!fexpr->funcretset)
+ return NULL;
+
+ /*
+ * Refuse to inline if the arguments contain any volatile functions or
+ * sub-selects. Volatile functions are rejected because inlining may
+ * result in the arguments being evaluated multiple times, risking a
+ * change in behavior. Sub-selects are rejected partly for implementation
+ * reasons (pushing them down another level might change their behavior)
+ * and partly because they're likely to be expensive and so multiple
+ * evaluation would be bad.
+ */
+ if (contain_volatile_functions((Node *) fexpr->args) ||
+ contain_subplans((Node *) fexpr->args))
+ return NULL;
+
+ /* Check permission to call function (fail later, if not) */
+ if (pg_proc_aclcheck(func_oid, GetUserId(), ACL_EXECUTE) != ACLCHECK_OK)
+ return NULL;
+
+ /* Check whether a plugin wants to hook function entry/exit */
+ if (FmgrHookIsNeeded(func_oid))
+ return NULL;
+
+ /*
+ * OK, let's take a look at the function's pg_proc entry.
+ */
+ func_tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(func_oid));
+ if (!HeapTupleIsValid(func_tuple))
+ elog(ERROR, "cache lookup failed for function %u", func_oid);
+ funcform = (Form_pg_proc) GETSTRUCT(func_tuple);
+
+ /*
+ * Forget it if the function is not SQL-language or has other showstopper
+ * properties. In particular it mustn't be declared STRICT, since we
+ * couldn't enforce that. It also mustn't be VOLATILE, because that is
+ * supposed to cause it to be executed with its own snapshot, rather than
+ * sharing the snapshot of the calling query. We also disallow returning
+ * SETOF VOID, because inlining would result in exposing the actual result
+ * of the function's last SELECT, which should not happen in that case.
+ * (Rechecking prokind, proretset, and pronargs is just paranoia.)
+ */
+ if (funcform->prolang != SQLlanguageId ||
+ funcform->prokind != PROKIND_FUNCTION ||
+ funcform->proisstrict ||
+ funcform->provolatile == PROVOLATILE_VOLATILE ||
+ funcform->prorettype == VOIDOID ||
+ funcform->prosecdef ||
+ !funcform->proretset ||
+ list_length(fexpr->args) != funcform->pronargs ||
+ !heap_attisnull(func_tuple, Anum_pg_proc_proconfig, NULL))
+ {
+ ReleaseSysCache(func_tuple);
+ return NULL;
+ }
+
+ /*
+ * Make a temporary memory context, so that we don't leak all the stuff
+ * that parsing might create.
+ */
+ mycxt = AllocSetContextCreate(CurrentMemoryContext,
+ "inline_set_returning_function",
+ ALLOCSET_DEFAULT_SIZES);
+ oldcxt = MemoryContextSwitchTo(mycxt);
+
+ /* Fetch the function body */
+ tmp = SysCacheGetAttr(PROCOID,
+ func_tuple,
+ Anum_pg_proc_prosrc,
+ &isNull);
+ if (isNull)
+ elog(ERROR, "null prosrc for function %u", func_oid);
+ src = TextDatumGetCString(tmp);
+
+ /*
+ * Setup error traceback support for ereport(). This is so that we can
+ * finger the function that bad information came from.
+ */
+ callback_arg.proname = NameStr(funcform->proname);
+ callback_arg.prosrc = src;
+
+ sqlerrcontext.callback = sql_inline_error_callback;
+ sqlerrcontext.arg = (void *) &callback_arg;
+ sqlerrcontext.previous = error_context_stack;
+ error_context_stack = &sqlerrcontext;
+
+ /* If we have prosqlbody, pay attention to that not prosrc */
+ tmp = SysCacheGetAttr(PROCOID,
+ func_tuple,
+ Anum_pg_proc_prosqlbody,
+ &isNull);
+ if (!isNull)
+ {
+ Node *n;
+
+ n = stringToNode(TextDatumGetCString(tmp));
+ if (IsA(n, List))
+ querytree_list = linitial_node(List, castNode(List, n));
+ else
+ querytree_list = list_make1(n);
+ if (list_length(querytree_list) != 1)
+ goto fail;
+ querytree = linitial(querytree_list);
+
+ /* Acquire necessary locks, then apply rewriter. */
+ AcquireRewriteLocks(querytree, true, false);
+ querytree_list = pg_rewrite_query(querytree);
+ if (list_length(querytree_list) != 1)
+ goto fail;
+ querytree = linitial(querytree_list);
+ }
+ else
+ {
+ /*
+ * Set up to handle parameters while parsing the function body. We
+ * can use the FuncExpr just created as the input for
+ * prepare_sql_fn_parse_info.
+ */
+ pinfo = prepare_sql_fn_parse_info(func_tuple,
+ (Node *) fexpr,
+ fexpr->inputcollid);
+
+ /*
+ * Parse, analyze, and rewrite (unlike inline_function(), we can't
+ * skip rewriting here). We can fail as soon as we find more than one
+ * query, though.
+ */
+ raw_parsetree_list = pg_parse_query(src);
+ if (list_length(raw_parsetree_list) != 1)
+ goto fail;
+
+ querytree_list = pg_analyze_and_rewrite_withcb(linitial(raw_parsetree_list),
+ src,
+ (ParserSetupHook) sql_fn_parser_setup,
+ pinfo, NULL);
+ if (list_length(querytree_list) != 1)
+ goto fail;
+ querytree = linitial(querytree_list);
+ }
+
+ /*
+ * Also resolve the actual function result tupdesc, if composite. If the
+ * function is just declared to return RECORD, dig the info out of the AS
+ * clause.
+ */
+ functypclass = get_expr_result_type((Node *) fexpr, NULL, &rettupdesc);
+ if (functypclass == TYPEFUNC_RECORD)
+ rettupdesc = BuildDescFromLists(rtfunc->funccolnames,
+ rtfunc->funccoltypes,
+ rtfunc->funccoltypmods,
+ rtfunc->funccolcollations);
+
+ /*
+ * The single command must be a plain SELECT.
+ */
+ if (!IsA(querytree, Query) ||
+ querytree->commandType != CMD_SELECT)
+ goto fail;
+
+ /*
+ * Make sure the function (still) returns what it's declared to. This
+ * will raise an error if wrong, but that's okay since the function would
+ * fail at runtime anyway. Note that check_sql_fn_retval will also insert
+ * coercions if needed to make the tlist expression(s) match the declared
+ * type of the function. We also ask it to insert dummy NULL columns for
+ * any dropped columns in rettupdesc, so that the elements of the modified
+ * tlist match up to the attribute numbers.
+ *
+ * If the function returns a composite type, don't inline unless the check
+ * shows it's returning a whole tuple result; otherwise what it's
+ * returning is a single composite column which is not what we need.
+ */
+ if (!check_sql_fn_retval(list_make1(querytree_list),
+ fexpr->funcresulttype, rettupdesc,
+ true, NULL) &&
+ (functypclass == TYPEFUNC_COMPOSITE ||
+ functypclass == TYPEFUNC_COMPOSITE_DOMAIN ||
+ functypclass == TYPEFUNC_RECORD))
+ goto fail; /* reject not-whole-tuple-result cases */
+
+ /*
+ * check_sql_fn_retval might've inserted a projection step, but that's
+ * fine; just make sure we use the upper Query.
+ */
+ querytree = linitial_node(Query, querytree_list);
+
+ /*
+ * Looks good --- substitute parameters into the query.
+ */
+ querytree = substitute_actual_srf_parameters(querytree,
+ funcform->pronargs,
+ fexpr->args);
+
+ /*
+ * Copy the modified query out of the temporary memory context, and clean
+ * up.
+ */
+ MemoryContextSwitchTo(oldcxt);
+
+ querytree = copyObject(querytree);
+
+ MemoryContextDelete(mycxt);
+ error_context_stack = sqlerrcontext.previous;
+ ReleaseSysCache(func_tuple);
+
+ /*
+ * We don't have to fix collations here because the upper query is already
+ * parsed, ie, the collations in the RTE are what count.
+ */
+
+ /*
+ * Since there is now no trace of the function in the plan tree, we must
+ * explicitly record the plan's dependency on the function.
+ */
+ record_plan_function_dependency(root, func_oid);
+
+ /*
+ * We must also notice if the inserted query adds a dependency on the
+ * calling role due to RLS quals.
+ */
+ if (querytree->hasRowSecurity)
+ root->glob->dependsOnRole = true;
+
+ return querytree;
+
+ /* Here if func is not inlinable: release temp memory and return NULL */
+fail:
+ MemoryContextSwitchTo(oldcxt);
+ MemoryContextDelete(mycxt);
+ error_context_stack = sqlerrcontext.previous;
+ ReleaseSysCache(func_tuple);
+
+ return NULL;
+}
+
+/*
+ * Replace Param nodes by appropriate actual parameters
+ *
+ * This is just enough different from substitute_actual_parameters()
+ * that it needs its own code.
+ */
+static Query *
+substitute_actual_srf_parameters(Query *expr, int nargs, List *args)
+{
+ substitute_actual_srf_parameters_context context;
+
+ context.nargs = nargs;
+ context.args = args;
+ context.sublevels_up = 1;
+
+ return query_tree_mutator(expr,
+ substitute_actual_srf_parameters_mutator,
+ &context,
+ 0);
+}
+
+static Node *
+substitute_actual_srf_parameters_mutator(Node *node,
+ substitute_actual_srf_parameters_context *context)
+{
+ Node *result;
+
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, Query))
+ {
+ context->sublevels_up++;
+ result = (Node *) query_tree_mutator((Query *) node,
+ substitute_actual_srf_parameters_mutator,
+ (void *) context,
+ 0);
+ context->sublevels_up--;
+ return result;
+ }
+ if (IsA(node, Param))
+ {
+ Param *param = (Param *) node;
+
+ if (param->paramkind == PARAM_EXTERN)
+ {
+ if (param->paramid <= 0 || param->paramid > context->nargs)
+ elog(ERROR, "invalid paramid: %d", param->paramid);
+
+ /*
+ * Since the parameter is being inserted into a subquery, we must
+ * adjust levels.
+ */
+ result = copyObject(list_nth(context->args, param->paramid - 1));
+ IncrementVarSublevelsUp(result, context->sublevels_up, 0);
+ return result;
+ }
+ }
+ return expression_tree_mutator(node,
+ substitute_actual_srf_parameters_mutator,
+ (void *) context);
+}
+
+/*
+ * pull_paramids
+ * Returns a Bitmapset containing the paramids of all Params in 'expr'.
+ */
+Bitmapset *
+pull_paramids(Expr *expr)
+{
+ Bitmapset *result = NULL;
+
+ (void) pull_paramids_walker((Node *) expr, &result);
+
+ return result;
+}
+
+static bool
+pull_paramids_walker(Node *node, Bitmapset **context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Param))
+ {
+ Param *param = (Param *) node;
+
+ *context = bms_add_member(*context, param->paramid);
+ return false;
+ }
+ return expression_tree_walker(node, pull_paramids_walker,
+ (void *) context);
+}
diff --git a/src/backend/optimizer/util/inherit.c b/src/backend/optimizer/util/inherit.c
new file mode 100644
index 0000000..3c11f5d
--- /dev/null
+++ b/src/backend/optimizer/util/inherit.c
@@ -0,0 +1,949 @@
+/*-------------------------------------------------------------------------
+ *
+ * inherit.c
+ * Routines to process child relations in inheritance trees
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/inherit.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/sysattr.h"
+#include "access/table.h"
+#include "catalog/partition.h"
+#include "catalog/pg_inherits.h"
+#include "catalog/pg_type.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "optimizer/appendinfo.h"
+#include "optimizer/inherit.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/plancat.h"
+#include "optimizer/planmain.h"
+#include "optimizer/planner.h"
+#include "optimizer/prep.h"
+#include "optimizer/restrictinfo.h"
+#include "parser/parsetree.h"
+#include "partitioning/partdesc.h"
+#include "partitioning/partprune.h"
+#include "utils/rel.h"
+
+
+static void expand_partitioned_rtentry(PlannerInfo *root, RelOptInfo *relinfo,
+ RangeTblEntry *parentrte,
+ Index parentRTindex, Relation parentrel,
+ PlanRowMark *top_parentrc, LOCKMODE lockmode);
+static void expand_single_inheritance_child(PlannerInfo *root,
+ RangeTblEntry *parentrte,
+ Index parentRTindex, Relation parentrel,
+ PlanRowMark *top_parentrc, Relation childrel,
+ RangeTblEntry **childrte_p,
+ Index *childRTindex_p);
+static Bitmapset *translate_col_privs(const Bitmapset *parent_privs,
+ List *translated_vars);
+static Bitmapset *translate_col_privs_multilevel(PlannerInfo *root,
+ RelOptInfo *rel,
+ RelOptInfo *parent_rel,
+ Bitmapset *parent_cols);
+static void expand_appendrel_subquery(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte, Index rti);
+
+
+/*
+ * expand_inherited_rtentry
+ * Expand a rangetable entry that has the "inh" bit set.
+ *
+ * "inh" is only allowed in two cases: RELATION and SUBQUERY RTEs.
+ *
+ * "inh" on a plain RELATION RTE means that it is a partitioned table or the
+ * parent of a traditional-inheritance set. In this case we must add entries
+ * for all the interesting child tables to the query's rangetable, and build
+ * additional planner data structures for them, including RelOptInfos,
+ * AppendRelInfos, and possibly PlanRowMarks.
+ *
+ * Note that the original RTE is considered to represent the whole inheritance
+ * set. In the case of traditional inheritance, the first of the generated
+ * RTEs is an RTE for the same table, but with inh = false, to represent the
+ * parent table in its role as a simple member of the inheritance set. For
+ * partitioning, we don't need a second RTE because the partitioned table
+ * itself has no data and need not be scanned.
+ *
+ * "inh" on a SUBQUERY RTE means that it's the parent of a UNION ALL group,
+ * which is treated as an appendrel similarly to inheritance cases; however,
+ * we already made RTEs and AppendRelInfos for the subqueries. We only need
+ * to build RelOptInfos for them, which is done by expand_appendrel_subquery.
+ */
+void
+expand_inherited_rtentry(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte, Index rti)
+{
+ Oid parentOID;
+ Relation oldrelation;
+ LOCKMODE lockmode;
+ PlanRowMark *oldrc;
+ bool old_isParent = false;
+ int old_allMarkTypes = 0;
+
+ Assert(rte->inh); /* else caller error */
+
+ if (rte->rtekind == RTE_SUBQUERY)
+ {
+ expand_appendrel_subquery(root, rel, rte, rti);
+ return;
+ }
+
+ Assert(rte->rtekind == RTE_RELATION);
+
+ parentOID = rte->relid;
+
+ /*
+ * We used to check has_subclass() here, but there's no longer any need
+ * to, because subquery_planner already did.
+ */
+
+ /*
+ * The rewriter should already have obtained an appropriate lock on each
+ * relation named in the query, so we can open the parent relation without
+ * locking it. However, for each child relation we add to the query, we
+ * must obtain an appropriate lock, because this will be the first use of
+ * those relations in the parse/rewrite/plan pipeline. Child rels should
+ * use the same lockmode as their parent.
+ */
+ oldrelation = table_open(parentOID, NoLock);
+ lockmode = rte->rellockmode;
+
+ /*
+ * If parent relation is selected FOR UPDATE/SHARE, we need to mark its
+ * PlanRowMark as isParent = true, and generate a new PlanRowMark for each
+ * child.
+ */
+ oldrc = get_plan_rowmark(root->rowMarks, rti);
+ if (oldrc)
+ {
+ old_isParent = oldrc->isParent;
+ oldrc->isParent = true;
+ /* Save initial value of allMarkTypes before children add to it */
+ old_allMarkTypes = oldrc->allMarkTypes;
+ }
+
+ /* Scan the inheritance set and expand it */
+ if (oldrelation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
+ {
+ /*
+ * Partitioned table, so set up for partitioning.
+ */
+ Assert(rte->relkind == RELKIND_PARTITIONED_TABLE);
+
+ /*
+ * Recursively expand and lock the partitions. While at it, also
+ * extract the partition key columns of all the partitioned tables.
+ */
+ expand_partitioned_rtentry(root, rel, rte, rti,
+ oldrelation, oldrc, lockmode);
+ }
+ else
+ {
+ /*
+ * Ordinary table, so process traditional-inheritance children. (Note
+ * that partitioned tables are not allowed to have inheritance
+ * children, so it's not possible for both cases to apply.)
+ */
+ List *inhOIDs;
+ ListCell *l;
+
+ /* Scan for all members of inheritance set, acquire needed locks */
+ inhOIDs = find_all_inheritors(parentOID, lockmode, NULL);
+
+ /*
+ * We used to special-case the situation where the table no longer has
+ * any children, by clearing rte->inh and exiting. That no longer
+ * works, because this function doesn't get run until after decisions
+ * have been made that depend on rte->inh. We have to treat such
+ * situations as normal inheritance. The table itself should always
+ * have been found, though.
+ */
+ Assert(inhOIDs != NIL);
+ Assert(linitial_oid(inhOIDs) == parentOID);
+
+ /* Expand simple_rel_array and friends to hold child objects. */
+ expand_planner_arrays(root, list_length(inhOIDs));
+
+ /*
+ * Expand inheritance children in the order the OIDs were returned by
+ * find_all_inheritors.
+ */
+ foreach(l, inhOIDs)
+ {
+ Oid childOID = lfirst_oid(l);
+ Relation newrelation;
+ RangeTblEntry *childrte;
+ Index childRTindex;
+
+ /* Open rel if needed; we already have required locks */
+ if (childOID != parentOID)
+ newrelation = table_open(childOID, NoLock);
+ else
+ newrelation = oldrelation;
+
+ /*
+ * It is possible that the parent table has children that are temp
+ * tables of other backends. We cannot safely access such tables
+ * (because of buffering issues), and the best thing to do seems
+ * to be to silently ignore them.
+ */
+ if (childOID != parentOID && RELATION_IS_OTHER_TEMP(newrelation))
+ {
+ table_close(newrelation, lockmode);
+ continue;
+ }
+
+ /* Create RTE and AppendRelInfo, plus PlanRowMark if needed. */
+ expand_single_inheritance_child(root, rte, rti, oldrelation,
+ oldrc, newrelation,
+ &childrte, &childRTindex);
+
+ /* Create the otherrel RelOptInfo too. */
+ (void) build_simple_rel(root, childRTindex, rel);
+
+ /* Close child relations, but keep locks */
+ if (childOID != parentOID)
+ table_close(newrelation, NoLock);
+ }
+ }
+
+ /*
+ * Some children might require different mark types, which would've been
+ * reported into oldrc. If so, add relevant entries to the top-level
+ * targetlist and update parent rel's reltarget. This should match what
+ * preprocess_targetlist() would have added if the mark types had been
+ * requested originally.
+ *
+ * (Someday it might be useful to fold these resjunk columns into the
+ * row-identity-column management used for UPDATE/DELETE. Today is not
+ * that day, however.)
+ */
+ if (oldrc)
+ {
+ int new_allMarkTypes = oldrc->allMarkTypes;
+ Var *var;
+ TargetEntry *tle;
+ char resname[32];
+ List *newvars = NIL;
+
+ /* Add TID junk Var if needed, unless we had it already */
+ if (new_allMarkTypes & ~(1 << ROW_MARK_COPY) &&
+ !(old_allMarkTypes & ~(1 << ROW_MARK_COPY)))
+ {
+ /* Need to fetch TID */
+ var = makeVar(oldrc->rti,
+ SelfItemPointerAttributeNumber,
+ TIDOID,
+ -1,
+ InvalidOid,
+ 0);
+ snprintf(resname, sizeof(resname), "ctid%u", oldrc->rowmarkId);
+ tle = makeTargetEntry((Expr *) var,
+ list_length(root->processed_tlist) + 1,
+ pstrdup(resname),
+ true);
+ root->processed_tlist = lappend(root->processed_tlist, tle);
+ newvars = lappend(newvars, var);
+ }
+
+ /* Add whole-row junk Var if needed, unless we had it already */
+ if ((new_allMarkTypes & (1 << ROW_MARK_COPY)) &&
+ !(old_allMarkTypes & (1 << ROW_MARK_COPY)))
+ {
+ var = makeWholeRowVar(planner_rt_fetch(oldrc->rti, root),
+ oldrc->rti,
+ 0,
+ false);
+ snprintf(resname, sizeof(resname), "wholerow%u", oldrc->rowmarkId);
+ tle = makeTargetEntry((Expr *) var,
+ list_length(root->processed_tlist) + 1,
+ pstrdup(resname),
+ true);
+ root->processed_tlist = lappend(root->processed_tlist, tle);
+ newvars = lappend(newvars, var);
+ }
+
+ /* Add tableoid junk Var, unless we had it already */
+ if (!old_isParent)
+ {
+ var = makeVar(oldrc->rti,
+ TableOidAttributeNumber,
+ OIDOID,
+ -1,
+ InvalidOid,
+ 0);
+ snprintf(resname, sizeof(resname), "tableoid%u", oldrc->rowmarkId);
+ tle = makeTargetEntry((Expr *) var,
+ list_length(root->processed_tlist) + 1,
+ pstrdup(resname),
+ true);
+ root->processed_tlist = lappend(root->processed_tlist, tle);
+ newvars = lappend(newvars, var);
+ }
+
+ /*
+ * Add the newly added Vars to parent's reltarget. We needn't worry
+ * about the children's reltargets, they'll be made later.
+ */
+ add_vars_to_targetlist(root, newvars, bms_make_singleton(0), false);
+ }
+
+ table_close(oldrelation, NoLock);
+}
+
+/*
+ * expand_partitioned_rtentry
+ * Recursively expand an RTE for a partitioned table.
+ */
+static void
+expand_partitioned_rtentry(PlannerInfo *root, RelOptInfo *relinfo,
+ RangeTblEntry *parentrte,
+ Index parentRTindex, Relation parentrel,
+ PlanRowMark *top_parentrc, LOCKMODE lockmode)
+{
+ PartitionDesc partdesc;
+ Bitmapset *live_parts;
+ int num_live_parts;
+ int i;
+
+ check_stack_depth();
+
+ Assert(parentrte->inh);
+
+ partdesc = PartitionDirectoryLookup(root->glob->partition_directory,
+ parentrel);
+
+ /* A partitioned table should always have a partition descriptor. */
+ Assert(partdesc);
+
+ /*
+ * Note down whether any partition key cols are being updated. Though it's
+ * the root partitioned table's updatedCols we are interested in, we
+ * instead use parentrte to get the updatedCols. This is convenient
+ * because parentrte already has the root partrel's updatedCols translated
+ * to match the attribute ordering of parentrel.
+ */
+ if (!root->partColsUpdated)
+ root->partColsUpdated =
+ has_partition_attrs(parentrel, parentrte->updatedCols, NULL);
+
+ /* Nothing further to do here if there are no partitions. */
+ if (partdesc->nparts == 0)
+ return;
+
+ /*
+ * Perform partition pruning using restriction clauses assigned to parent
+ * relation. live_parts will contain PartitionDesc indexes of partitions
+ * that survive pruning. Below, we will initialize child objects for the
+ * surviving partitions.
+ */
+ relinfo->live_parts = live_parts = prune_append_rel_partitions(relinfo);
+
+ /* Expand simple_rel_array and friends to hold child objects. */
+ num_live_parts = bms_num_members(live_parts);
+ if (num_live_parts > 0)
+ expand_planner_arrays(root, num_live_parts);
+
+ /*
+ * We also store partition RelOptInfo pointers in the parent relation.
+ * Since we're palloc0'ing, slots corresponding to pruned partitions will
+ * contain NULL.
+ */
+ Assert(relinfo->part_rels == NULL);
+ relinfo->part_rels = (RelOptInfo **)
+ palloc0(relinfo->nparts * sizeof(RelOptInfo *));
+
+ /*
+ * Create a child RTE for each live partition. Note that unlike
+ * traditional inheritance, we don't need a child RTE for the partitioned
+ * table itself, because it's not going to be scanned.
+ */
+ i = -1;
+ while ((i = bms_next_member(live_parts, i)) >= 0)
+ {
+ Oid childOID = partdesc->oids[i];
+ Relation childrel;
+ RangeTblEntry *childrte;
+ Index childRTindex;
+ RelOptInfo *childrelinfo;
+
+ /* Open rel, acquiring required locks */
+ childrel = table_open(childOID, lockmode);
+
+ /*
+ * Temporary partitions belonging to other sessions should have been
+ * disallowed at definition, but for paranoia's sake, let's double
+ * check.
+ */
+ if (RELATION_IS_OTHER_TEMP(childrel))
+ elog(ERROR, "temporary relation from another session found as partition");
+
+ /* Create RTE and AppendRelInfo, plus PlanRowMark if needed. */
+ expand_single_inheritance_child(root, parentrte, parentRTindex,
+ parentrel, top_parentrc, childrel,
+ &childrte, &childRTindex);
+
+ /* Create the otherrel RelOptInfo too. */
+ childrelinfo = build_simple_rel(root, childRTindex, relinfo);
+ relinfo->part_rels[i] = childrelinfo;
+ relinfo->all_partrels = bms_add_members(relinfo->all_partrels,
+ childrelinfo->relids);
+
+ /* If this child is itself partitioned, recurse */
+ if (childrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
+ expand_partitioned_rtentry(root, childrelinfo,
+ childrte, childRTindex,
+ childrel, top_parentrc, lockmode);
+
+ /* Close child relation, but keep locks */
+ table_close(childrel, NoLock);
+ }
+}
+
+/*
+ * expand_single_inheritance_child
+ * Build a RangeTblEntry and an AppendRelInfo, plus maybe a PlanRowMark.
+ *
+ * We now expand the partition hierarchy level by level, creating a
+ * corresponding hierarchy of AppendRelInfos and RelOptInfos, where each
+ * partitioned descendant acts as a parent of its immediate partitions.
+ * (This is a difference from what older versions of PostgreSQL did and what
+ * is still done in the case of table inheritance for unpartitioned tables,
+ * where the hierarchy is flattened during RTE expansion.)
+ *
+ * PlanRowMarks still carry the top-parent's RTI, and the top-parent's
+ * allMarkTypes field still accumulates values from all descendents.
+ *
+ * "parentrte" and "parentRTindex" are immediate parent's RTE and
+ * RTI. "top_parentrc" is top parent's PlanRowMark.
+ *
+ * The child RangeTblEntry and its RTI are returned in "childrte_p" and
+ * "childRTindex_p" resp.
+ */
+static void
+expand_single_inheritance_child(PlannerInfo *root, RangeTblEntry *parentrte,
+ Index parentRTindex, Relation parentrel,
+ PlanRowMark *top_parentrc, Relation childrel,
+ RangeTblEntry **childrte_p,
+ Index *childRTindex_p)
+{
+ Query *parse = root->parse;
+ Oid parentOID = RelationGetRelid(parentrel);
+ Oid childOID = RelationGetRelid(childrel);
+ RangeTblEntry *childrte;
+ Index childRTindex;
+ AppendRelInfo *appinfo;
+ TupleDesc child_tupdesc;
+ List *parent_colnames;
+ List *child_colnames;
+
+ /*
+ * Build an RTE for the child, and attach to query's rangetable list. We
+ * copy most scalar fields of the parent's RTE, but replace relation OID,
+ * relkind, and inh for the child. Also, set requiredPerms to zero since
+ * all required permissions checks are done on the original RTE. Likewise,
+ * set the child's securityQuals to empty, because we only want to apply
+ * the parent's RLS conditions regardless of what RLS properties
+ * individual children may have. (This is an intentional choice to make
+ * inherited RLS work like regular permissions checks.) The parent
+ * securityQuals will be propagated to children along with other base
+ * restriction clauses, so we don't need to do it here. Other
+ * infrastructure of the parent RTE has to be translated to match the
+ * child table's column ordering, which we do below, so a "flat" copy is
+ * sufficient to start with.
+ */
+ childrte = makeNode(RangeTblEntry);
+ memcpy(childrte, parentrte, sizeof(RangeTblEntry));
+ Assert(parentrte->rtekind == RTE_RELATION); /* else this is dubious */
+ childrte->relid = childOID;
+ childrte->relkind = childrel->rd_rel->relkind;
+ /* A partitioned child will need to be expanded further. */
+ if (childrte->relkind == RELKIND_PARTITIONED_TABLE)
+ {
+ Assert(childOID != parentOID);
+ childrte->inh = true;
+ }
+ else
+ childrte->inh = false;
+ childrte->requiredPerms = 0;
+ childrte->securityQuals = NIL;
+
+ /* Link not-yet-fully-filled child RTE into data structures */
+ parse->rtable = lappend(parse->rtable, childrte);
+ childRTindex = list_length(parse->rtable);
+ *childrte_p = childrte;
+ *childRTindex_p = childRTindex;
+
+ /*
+ * Build an AppendRelInfo struct for each parent/child pair.
+ */
+ appinfo = make_append_rel_info(parentrel, childrel,
+ parentRTindex, childRTindex);
+ root->append_rel_list = lappend(root->append_rel_list, appinfo);
+
+ /* tablesample is probably null, but copy it */
+ childrte->tablesample = copyObject(parentrte->tablesample);
+
+ /*
+ * Construct an alias clause for the child, which we can also use as eref.
+ * This is important so that EXPLAIN will print the right column aliases
+ * for child-table columns. (Since ruleutils.c doesn't have any easy way
+ * to reassociate parent and child columns, we must get the child column
+ * aliases right to start with. Note that setting childrte->alias forces
+ * ruleutils.c to use these column names, which it otherwise would not.)
+ */
+ child_tupdesc = RelationGetDescr(childrel);
+ parent_colnames = parentrte->eref->colnames;
+ child_colnames = NIL;
+ for (int cattno = 0; cattno < child_tupdesc->natts; cattno++)
+ {
+ Form_pg_attribute att = TupleDescAttr(child_tupdesc, cattno);
+ const char *attname;
+
+ if (att->attisdropped)
+ {
+ /* Always insert an empty string for a dropped column */
+ attname = "";
+ }
+ else if (appinfo->parent_colnos[cattno] > 0 &&
+ appinfo->parent_colnos[cattno] <= list_length(parent_colnames))
+ {
+ /* Duplicate the query-assigned name for the parent column */
+ attname = strVal(list_nth(parent_colnames,
+ appinfo->parent_colnos[cattno] - 1));
+ }
+ else
+ {
+ /* New column, just use its real name */
+ attname = NameStr(att->attname);
+ }
+ child_colnames = lappend(child_colnames, makeString(pstrdup(attname)));
+ }
+
+ /*
+ * We just duplicate the parent's table alias name for each child. If the
+ * plan gets printed, ruleutils.c has to sort out unique table aliases to
+ * use, which it can handle.
+ */
+ childrte->alias = childrte->eref = makeAlias(parentrte->eref->aliasname,
+ child_colnames);
+
+ /*
+ * Translate the column permissions bitmaps to the child's attnums (we
+ * have to build the translated_vars list before we can do this). But if
+ * this is the parent table, we can just duplicate the parent's bitmaps.
+ *
+ * Note: we need to do this even though the executor won't run any
+ * permissions checks on the child RTE. The insertedCols/updatedCols
+ * bitmaps may be examined for trigger-firing purposes.
+ */
+ if (childOID != parentOID)
+ {
+ childrte->selectedCols = translate_col_privs(parentrte->selectedCols,
+ appinfo->translated_vars);
+ childrte->insertedCols = translate_col_privs(parentrte->insertedCols,
+ appinfo->translated_vars);
+ childrte->updatedCols = translate_col_privs(parentrte->updatedCols,
+ appinfo->translated_vars);
+ }
+ else
+ {
+ childrte->selectedCols = bms_copy(parentrte->selectedCols);
+ childrte->insertedCols = bms_copy(parentrte->insertedCols);
+ childrte->updatedCols = bms_copy(parentrte->updatedCols);
+ }
+
+ /*
+ * Store the RTE and appinfo in the respective PlannerInfo arrays, which
+ * the caller must already have allocated space for.
+ */
+ Assert(childRTindex < root->simple_rel_array_size);
+ Assert(root->simple_rte_array[childRTindex] == NULL);
+ root->simple_rte_array[childRTindex] = childrte;
+ Assert(root->append_rel_array[childRTindex] == NULL);
+ root->append_rel_array[childRTindex] = appinfo;
+
+ /*
+ * Build a PlanRowMark if parent is marked FOR UPDATE/SHARE.
+ */
+ if (top_parentrc)
+ {
+ PlanRowMark *childrc = makeNode(PlanRowMark);
+
+ childrc->rti = childRTindex;
+ childrc->prti = top_parentrc->rti;
+ childrc->rowmarkId = top_parentrc->rowmarkId;
+ /* Reselect rowmark type, because relkind might not match parent */
+ childrc->markType = select_rowmark_type(childrte,
+ top_parentrc->strength);
+ childrc->allMarkTypes = (1 << childrc->markType);
+ childrc->strength = top_parentrc->strength;
+ childrc->waitPolicy = top_parentrc->waitPolicy;
+
+ /*
+ * We mark RowMarks for partitioned child tables as parent RowMarks so
+ * that the executor ignores them (except their existence means that
+ * the child tables will be locked using the appropriate mode).
+ */
+ childrc->isParent = (childrte->relkind == RELKIND_PARTITIONED_TABLE);
+
+ /* Include child's rowmark type in top parent's allMarkTypes */
+ top_parentrc->allMarkTypes |= childrc->allMarkTypes;
+
+ root->rowMarks = lappend(root->rowMarks, childrc);
+ }
+
+ /*
+ * If we are creating a child of the query target relation (only possible
+ * in UPDATE/DELETE/MERGE), add it to all_result_relids, as well as
+ * leaf_result_relids if appropriate, and make sure that we generate
+ * required row-identity data.
+ */
+ if (bms_is_member(parentRTindex, root->all_result_relids))
+ {
+ /* OK, record the child as a result rel too. */
+ root->all_result_relids = bms_add_member(root->all_result_relids,
+ childRTindex);
+
+ /* Non-leaf partitions don't need any row identity info. */
+ if (childrte->relkind != RELKIND_PARTITIONED_TABLE)
+ {
+ Var *rrvar;
+
+ root->leaf_result_relids = bms_add_member(root->leaf_result_relids,
+ childRTindex);
+
+ /*
+ * If we have any child target relations, assume they all need to
+ * generate a junk "tableoid" column. (If only one child survives
+ * pruning, we wouldn't really need this, but it's not worth
+ * thrashing about to avoid it.)
+ */
+ rrvar = makeVar(childRTindex,
+ TableOidAttributeNumber,
+ OIDOID,
+ -1,
+ InvalidOid,
+ 0);
+ add_row_identity_var(root, rrvar, childRTindex, "tableoid");
+
+ /* Register any row-identity columns needed by this child. */
+ add_row_identity_columns(root, childRTindex,
+ childrte, childrel);
+ }
+ }
+}
+
+/*
+ * get_rel_all_updated_cols
+ * Returns the set of columns of a given "simple" relation that are
+ * updated by this query.
+ */
+Bitmapset *
+get_rel_all_updated_cols(PlannerInfo *root, RelOptInfo *rel)
+{
+ Index relid;
+ RangeTblEntry *rte;
+ Bitmapset *updatedCols,
+ *extraUpdatedCols;
+
+ Assert(root->parse->commandType == CMD_UPDATE);
+ Assert(IS_SIMPLE_REL(rel));
+
+ /*
+ * We obtain updatedCols for the query's result relation. Then, if
+ * necessary, we map it to the column numbers of the relation for which
+ * they were requested.
+ */
+ relid = root->parse->resultRelation;
+ rte = planner_rt_fetch(relid, root);
+
+ updatedCols = rte->updatedCols;
+
+ if (rel->relid != relid)
+ {
+ RelOptInfo *top_parent_rel = find_base_rel(root, relid);
+
+ Assert(IS_OTHER_REL(rel));
+
+ updatedCols = translate_col_privs_multilevel(root, rel, top_parent_rel,
+ updatedCols);
+ }
+
+ /*
+ * Now we must check to see if there are any generated columns that depend
+ * on the updatedCols, and add them to the result.
+ */
+ extraUpdatedCols = get_dependent_generated_columns(root, rel->relid,
+ updatedCols);
+
+ return bms_union(updatedCols, extraUpdatedCols);
+}
+
+/*
+ * translate_col_privs
+ * Translate a bitmapset representing per-column privileges from the
+ * parent rel's attribute numbering to the child's.
+ *
+ * The only surprise here is that we don't translate a parent whole-row
+ * reference into a child whole-row reference. That would mean requiring
+ * permissions on all child columns, which is overly strict, since the
+ * query is really only going to reference the inherited columns. Instead
+ * we set the per-column bits for all inherited columns.
+ */
+static Bitmapset *
+translate_col_privs(const Bitmapset *parent_privs,
+ List *translated_vars)
+{
+ Bitmapset *child_privs = NULL;
+ bool whole_row;
+ int attno;
+ ListCell *lc;
+
+ /* System attributes have the same numbers in all tables */
+ for (attno = FirstLowInvalidHeapAttributeNumber + 1; attno < 0; attno++)
+ {
+ if (bms_is_member(attno - FirstLowInvalidHeapAttributeNumber,
+ parent_privs))
+ child_privs = bms_add_member(child_privs,
+ attno - FirstLowInvalidHeapAttributeNumber);
+ }
+
+ /* Check if parent has whole-row reference */
+ whole_row = bms_is_member(InvalidAttrNumber - FirstLowInvalidHeapAttributeNumber,
+ parent_privs);
+
+ /* And now translate the regular user attributes, using the vars list */
+ attno = InvalidAttrNumber;
+ foreach(lc, translated_vars)
+ {
+ Var *var = lfirst_node(Var, lc);
+
+ attno++;
+ if (var == NULL) /* ignore dropped columns */
+ continue;
+ if (whole_row ||
+ bms_is_member(attno - FirstLowInvalidHeapAttributeNumber,
+ parent_privs))
+ child_privs = bms_add_member(child_privs,
+ var->varattno - FirstLowInvalidHeapAttributeNumber);
+ }
+
+ return child_privs;
+}
+
+/*
+ * translate_col_privs_multilevel
+ * Recursively translates the column numbers contained in 'parent_cols'
+ * to the column numbers of a descendant relation given by 'rel'
+ *
+ * Note that because this is based on translate_col_privs, it will expand
+ * a whole-row reference into all inherited columns. This is not an issue
+ * for current usages, but beware.
+ */
+static Bitmapset *
+translate_col_privs_multilevel(PlannerInfo *root, RelOptInfo *rel,
+ RelOptInfo *parent_rel,
+ Bitmapset *parent_cols)
+{
+ AppendRelInfo *appinfo;
+
+ /* Fast path for easy case. */
+ if (parent_cols == NULL)
+ return NULL;
+
+ Assert(root->append_rel_array != NULL);
+ appinfo = root->append_rel_array[rel->relid];
+ Assert(appinfo != NULL);
+
+ /* Recurse if immediate parent is not the top parent. */
+ if (appinfo->parent_relid != parent_rel->relid)
+ {
+ RelOptInfo *next_parent = find_base_rel(root, appinfo->parent_relid);
+
+ parent_cols = translate_col_privs_multilevel(root, next_parent,
+ parent_rel,
+ parent_cols);
+ }
+
+ /* Now translate for this child. */
+ return translate_col_privs(parent_cols, appinfo->translated_vars);
+}
+
+/*
+ * expand_appendrel_subquery
+ * Add "other rel" RelOptInfos for the children of an appendrel baserel
+ *
+ * "rel" is a subquery relation that has the rte->inh flag set, meaning it
+ * is a UNION ALL subquery that's been flattened into an appendrel, with
+ * child subqueries listed in root->append_rel_list. We need to build
+ * a RelOptInfo for each child relation so that we can plan scans on them.
+ */
+static void
+expand_appendrel_subquery(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte, Index rti)
+{
+ ListCell *l;
+
+ foreach(l, root->append_rel_list)
+ {
+ AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
+ Index childRTindex = appinfo->child_relid;
+ RangeTblEntry *childrte;
+ RelOptInfo *childrel;
+
+ /* append_rel_list contains all append rels; ignore others */
+ if (appinfo->parent_relid != rti)
+ continue;
+
+ /* find the child RTE, which should already exist */
+ Assert(childRTindex < root->simple_rel_array_size);
+ childrte = root->simple_rte_array[childRTindex];
+ Assert(childrte != NULL);
+
+ /* Build the child RelOptInfo. */
+ childrel = build_simple_rel(root, childRTindex, rel);
+
+ /* Child may itself be an inherited rel, either table or subquery. */
+ if (childrte->inh)
+ expand_inherited_rtentry(root, childrel, childrte, childRTindex);
+ }
+}
+
+
+/*
+ * apply_child_basequals
+ * Populate childrel's base restriction quals from parent rel's quals,
+ * translating them using appinfo.
+ *
+ * If any of the resulting clauses evaluate to constant false or NULL, we
+ * return false and don't apply any quals. Caller should mark the relation as
+ * a dummy rel in this case, since it doesn't need to be scanned.
+ */
+bool
+apply_child_basequals(PlannerInfo *root, RelOptInfo *parentrel,
+ RelOptInfo *childrel, RangeTblEntry *childRTE,
+ AppendRelInfo *appinfo)
+{
+ List *childquals;
+ Index cq_min_security;
+ ListCell *lc;
+
+ /*
+ * The child rel's targetlist might contain non-Var expressions, which
+ * means that substitution into the quals could produce opportunities for
+ * const-simplification, and perhaps even pseudoconstant quals. Therefore,
+ * transform each RestrictInfo separately to see if it reduces to a
+ * constant or pseudoconstant. (We must process them separately to keep
+ * track of the security level of each qual.)
+ */
+ childquals = NIL;
+ cq_min_security = UINT_MAX;
+ foreach(lc, parentrel->baserestrictinfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ Node *childqual;
+ ListCell *lc2;
+
+ Assert(IsA(rinfo, RestrictInfo));
+ childqual = adjust_appendrel_attrs(root,
+ (Node *) rinfo->clause,
+ 1, &appinfo);
+ childqual = eval_const_expressions(root, childqual);
+ /* check for flat-out constant */
+ if (childqual && IsA(childqual, Const))
+ {
+ if (((Const *) childqual)->constisnull ||
+ !DatumGetBool(((Const *) childqual)->constvalue))
+ {
+ /* Restriction reduces to constant FALSE or NULL */
+ return false;
+ }
+ /* Restriction reduces to constant TRUE, so drop it */
+ continue;
+ }
+ /* might have gotten an AND clause, if so flatten it */
+ foreach(lc2, make_ands_implicit((Expr *) childqual))
+ {
+ Node *onecq = (Node *) lfirst(lc2);
+ bool pseudoconstant;
+
+ /* check for pseudoconstant (no Vars or volatile functions) */
+ pseudoconstant =
+ !contain_vars_of_level(onecq, 0) &&
+ !contain_volatile_functions(onecq);
+ if (pseudoconstant)
+ {
+ /* tell createplan.c to check for gating quals */
+ root->hasPseudoConstantQuals = true;
+ }
+ /* reconstitute RestrictInfo with appropriate properties */
+ childquals = lappend(childquals,
+ make_restrictinfo(root,
+ (Expr *) onecq,
+ rinfo->is_pushed_down,
+ rinfo->outerjoin_delayed,
+ pseudoconstant,
+ rinfo->security_level,
+ NULL, NULL, NULL));
+ /* track minimum security level among child quals */
+ cq_min_security = Min(cq_min_security, rinfo->security_level);
+ }
+ }
+
+ /*
+ * In addition to the quals inherited from the parent, we might have
+ * securityQuals associated with this particular child node. (Currently
+ * this can only happen in appendrels originating from UNION ALL;
+ * inheritance child tables don't have their own securityQuals, see
+ * expand_single_inheritance_child().) Pull any such securityQuals up
+ * into the baserestrictinfo for the child. This is similar to
+ * process_security_barrier_quals() for the parent rel, except that we
+ * can't make any general deductions from such quals, since they don't
+ * hold for the whole appendrel.
+ */
+ if (childRTE->securityQuals)
+ {
+ Index security_level = 0;
+
+ foreach(lc, childRTE->securityQuals)
+ {
+ List *qualset = (List *) lfirst(lc);
+ ListCell *lc2;
+
+ foreach(lc2, qualset)
+ {
+ Expr *qual = (Expr *) lfirst(lc2);
+
+ /* not likely that we'd see constants here, so no check */
+ childquals = lappend(childquals,
+ make_restrictinfo(root, qual,
+ true, false, false,
+ security_level,
+ NULL, NULL, NULL));
+ cq_min_security = Min(cq_min_security, security_level);
+ }
+ security_level++;
+ }
+ Assert(security_level <= root->qual_security_level);
+ }
+
+ /*
+ * OK, we've got all the baserestrictinfo quals for this child.
+ */
+ childrel->baserestrictinfo = childquals;
+ childrel->baserestrict_min_security = cq_min_security;
+
+ return true;
+}
diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c
new file mode 100644
index 0000000..d4cffdb
--- /dev/null
+++ b/src/backend/optimizer/util/joininfo.c
@@ -0,0 +1,140 @@
+/*-------------------------------------------------------------------------
+ *
+ * joininfo.c
+ * joininfo list manipulation routines
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/joininfo.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "optimizer/joininfo.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+
+
+/*
+ * have_relevant_joinclause
+ * Detect whether there is a joinclause that involves
+ * the two given relations.
+ *
+ * Note: the joinclause does not have to be evaluable with only these two
+ * relations. This is intentional. For example consider
+ * SELECT * FROM a, b, c WHERE a.x = (b.y + c.z)
+ * If a is much larger than the other tables, it may be worthwhile to
+ * cross-join b and c and then use an inner indexscan on a.x. Therefore
+ * we should consider this joinclause as reason to join b to c, even though
+ * it can't be applied at that join step.
+ */
+bool
+have_relevant_joinclause(PlannerInfo *root,
+ RelOptInfo *rel1, RelOptInfo *rel2)
+{
+ bool result = false;
+ List *joininfo;
+ Relids other_relids;
+ ListCell *l;
+
+ /*
+ * We could scan either relation's joininfo list; may as well use the
+ * shorter one.
+ */
+ if (list_length(rel1->joininfo) <= list_length(rel2->joininfo))
+ {
+ joininfo = rel1->joininfo;
+ other_relids = rel2->relids;
+ }
+ else
+ {
+ joininfo = rel2->joininfo;
+ other_relids = rel1->relids;
+ }
+
+ foreach(l, joininfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
+
+ if (bms_overlap(other_relids, rinfo->required_relids))
+ {
+ result = true;
+ break;
+ }
+ }
+
+ /*
+ * We also need to check the EquivalenceClass data structure, which might
+ * contain relationships not emitted into the joininfo lists.
+ */
+ if (!result && rel1->has_eclass_joins && rel2->has_eclass_joins)
+ result = have_relevant_eclass_joinclause(root, rel1, rel2);
+
+ return result;
+}
+
+
+/*
+ * add_join_clause_to_rels
+ * Add 'restrictinfo' to the joininfo list of each relation it requires.
+ *
+ * Note that the same copy of the restrictinfo node is linked to by all the
+ * lists it is in. This allows us to exploit caching of information about
+ * the restriction clause (but we must be careful that the information does
+ * not depend on context).
+ *
+ * 'restrictinfo' describes the join clause
+ * 'join_relids' is the list of relations participating in the join clause
+ * (there must be more than one)
+ */
+void
+add_join_clause_to_rels(PlannerInfo *root,
+ RestrictInfo *restrictinfo,
+ Relids join_relids)
+{
+ int cur_relid;
+
+ cur_relid = -1;
+ while ((cur_relid = bms_next_member(join_relids, cur_relid)) >= 0)
+ {
+ RelOptInfo *rel = find_base_rel(root, cur_relid);
+
+ rel->joininfo = lappend(rel->joininfo, restrictinfo);
+ }
+}
+
+/*
+ * remove_join_clause_from_rels
+ * Delete 'restrictinfo' from all the joininfo lists it is in
+ *
+ * This reverses the effect of add_join_clause_to_rels. It's used when we
+ * discover that a relation need not be joined at all.
+ *
+ * 'restrictinfo' describes the join clause
+ * 'join_relids' is the list of relations participating in the join clause
+ * (there must be more than one)
+ */
+void
+remove_join_clause_from_rels(PlannerInfo *root,
+ RestrictInfo *restrictinfo,
+ Relids join_relids)
+{
+ int cur_relid;
+
+ cur_relid = -1;
+ while ((cur_relid = bms_next_member(join_relids, cur_relid)) >= 0)
+ {
+ RelOptInfo *rel = find_base_rel(root, cur_relid);
+
+ /*
+ * Remove the restrictinfo from the list. Pointer comparison is
+ * sufficient.
+ */
+ Assert(list_member_ptr(rel->joininfo, restrictinfo));
+ rel->joininfo = list_delete_ptr(rel->joininfo, restrictinfo);
+ }
+}
diff --git a/src/backend/optimizer/util/orclauses.c b/src/backend/optimizer/util/orclauses.c
new file mode 100644
index 0000000..b1363df
--- /dev/null
+++ b/src/backend/optimizer/util/orclauses.c
@@ -0,0 +1,360 @@
+/*-------------------------------------------------------------------------
+ *
+ * orclauses.c
+ * Routines to extract restriction OR clauses from join OR clauses
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/orclauses.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/orclauses.h"
+#include "optimizer/restrictinfo.h"
+
+
+static bool is_safe_restriction_clause_for(RestrictInfo *rinfo, RelOptInfo *rel);
+static Expr *extract_or_clause(RestrictInfo *or_rinfo, RelOptInfo *rel);
+static void consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel,
+ Expr *orclause, RestrictInfo *join_or_rinfo);
+
+
+/*
+ * extract_restriction_or_clauses
+ * Examine join OR-of-AND clauses to see if any useful restriction OR
+ * clauses can be extracted. If so, add them to the query.
+ *
+ * Although a join clause must reference multiple relations overall,
+ * an OR of ANDs clause might contain sub-clauses that reference just one
+ * relation and can be used to build a restriction clause for that rel.
+ * For example consider
+ * WHERE ((a.x = 42 AND b.y = 43) OR (a.x = 44 AND b.z = 45));
+ * We can transform this into
+ * WHERE ((a.x = 42 AND b.y = 43) OR (a.x = 44 AND b.z = 45))
+ * AND (a.x = 42 OR a.x = 44)
+ * AND (b.y = 43 OR b.z = 45);
+ * which allows the latter clauses to be applied during the scans of a and b,
+ * perhaps as index qualifications, and in any case reducing the number of
+ * rows arriving at the join. In essence this is a partial transformation to
+ * CNF (AND of ORs format). It is not complete, however, because we do not
+ * unravel the original OR --- doing so would usually bloat the qualification
+ * expression to little gain.
+ *
+ * The added quals are partially redundant with the original OR, and therefore
+ * would cause the size of the joinrel to be underestimated when it is finally
+ * formed. (This would be true of a full transformation to CNF as well; the
+ * fault is not really in the transformation, but in clauselist_selectivity's
+ * inability to recognize redundant conditions.) We can compensate for this
+ * redundancy by changing the cached selectivity of the original OR clause,
+ * canceling out the (valid) reduction in the estimated sizes of the base
+ * relations so that the estimated joinrel size remains the same. This is
+ * a MAJOR HACK: it depends on the fact that clause selectivities are cached
+ * and on the fact that the same RestrictInfo node will appear in every
+ * joininfo list that might be used when the joinrel is formed.
+ * And it doesn't work in cases where the size estimation is nonlinear
+ * (i.e., outer and IN joins). But it beats not doing anything.
+ *
+ * We examine each base relation to see if join clauses associated with it
+ * contain extractable restriction conditions. If so, add those conditions
+ * to the rel's baserestrictinfo and update the cached selectivities of the
+ * join clauses. Note that the same join clause will be examined afresh
+ * from the point of view of each baserel that participates in it, so its
+ * cached selectivity may get updated multiple times.
+ */
+void
+extract_restriction_or_clauses(PlannerInfo *root)
+{
+ Index rti;
+
+ /* Examine each baserel for potential join OR clauses */
+ for (rti = 1; rti < root->simple_rel_array_size; rti++)
+ {
+ RelOptInfo *rel = root->simple_rel_array[rti];
+ ListCell *lc;
+
+ /* there may be empty slots corresponding to non-baserel RTEs */
+ if (rel == NULL)
+ continue;
+
+ Assert(rel->relid == rti); /* sanity check on array */
+
+ /* ignore RTEs that are "other rels" */
+ if (rel->reloptkind != RELOPT_BASEREL)
+ continue;
+
+ /*
+ * Find potentially interesting OR joinclauses. We can use any
+ * joinclause that is considered safe to move to this rel by the
+ * parameterized-path machinery, even though what we are going to do
+ * with it is not exactly a parameterized path.
+ *
+ * However, it seems best to ignore clauses that have been marked
+ * redundant (by setting norm_selec > 1). That likely can't happen
+ * for OR clauses, but let's be safe.
+ */
+ foreach(lc, rel->joininfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ if (restriction_is_or_clause(rinfo) &&
+ join_clause_is_movable_to(rinfo, rel) &&
+ rinfo->norm_selec <= 1)
+ {
+ /* Try to extract a qual for this rel only */
+ Expr *orclause = extract_or_clause(rinfo, rel);
+
+ /*
+ * If successful, decide whether we want to use the clause,
+ * and insert it into the rel's restrictinfo list if so.
+ */
+ if (orclause)
+ consider_new_or_clause(root, rel, orclause, rinfo);
+ }
+ }
+ }
+}
+
+/*
+ * Is the given primitive (non-OR) RestrictInfo safe to move to the rel?
+ */
+static bool
+is_safe_restriction_clause_for(RestrictInfo *rinfo, RelOptInfo *rel)
+{
+ /*
+ * We want clauses that mention the rel, and only the rel. So in
+ * particular pseudoconstant clauses can be rejected quickly. Then check
+ * the clause's Var membership.
+ */
+ if (rinfo->pseudoconstant)
+ return false;
+ if (!bms_equal(rinfo->clause_relids, rel->relids))
+ return false;
+
+ /* We don't want extra evaluations of any volatile functions */
+ if (contain_volatile_functions((Node *) rinfo->clause))
+ return false;
+
+ return true;
+}
+
+/*
+ * Try to extract a restriction clause mentioning only "rel" from the given
+ * join OR-clause.
+ *
+ * We must be able to extract at least one qual for this rel from each of
+ * the arms of the OR, else we can't use it.
+ *
+ * Returns an OR clause (not a RestrictInfo!) pertaining to rel, or NULL
+ * if no OR clause could be extracted.
+ */
+static Expr *
+extract_or_clause(RestrictInfo *or_rinfo, RelOptInfo *rel)
+{
+ List *clauselist = NIL;
+ ListCell *lc;
+
+ /*
+ * Scan each arm of the input OR clause. Notice we descend into
+ * or_rinfo->orclause, which has RestrictInfo nodes embedded below the
+ * toplevel OR/AND structure. This is useful because we can use the info
+ * in those nodes to make is_safe_restriction_clause_for()'s checks
+ * cheaper. We'll strip those nodes from the returned tree, though,
+ * meaning that fresh ones will be built if the clause is accepted as a
+ * restriction clause. This might seem wasteful --- couldn't we re-use
+ * the existing RestrictInfos? But that'd require assuming that
+ * selectivity and other cached data is computed exactly the same way for
+ * a restriction clause as for a join clause, which seems undesirable.
+ */
+ Assert(is_orclause(or_rinfo->orclause));
+ foreach(lc, ((BoolExpr *) or_rinfo->orclause)->args)
+ {
+ Node *orarg = (Node *) lfirst(lc);
+ List *subclauses = NIL;
+ Node *subclause;
+
+ /* OR arguments should be ANDs or sub-RestrictInfos */
+ if (is_andclause(orarg))
+ {
+ List *andargs = ((BoolExpr *) orarg)->args;
+ ListCell *lc2;
+
+ foreach(lc2, andargs)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc2);
+
+ if (restriction_is_or_clause(rinfo))
+ {
+ /*
+ * Recurse to deal with nested OR. Note we *must* recurse
+ * here, this isn't just overly-tense optimization: we
+ * have to descend far enough to find and strip all
+ * RestrictInfos in the expression.
+ */
+ Expr *suborclause;
+
+ suborclause = extract_or_clause(rinfo, rel);
+ if (suborclause)
+ subclauses = lappend(subclauses, suborclause);
+ }
+ else if (is_safe_restriction_clause_for(rinfo, rel))
+ subclauses = lappend(subclauses, rinfo->clause);
+ }
+ }
+ else
+ {
+ RestrictInfo *rinfo = castNode(RestrictInfo, orarg);
+
+ Assert(!restriction_is_or_clause(rinfo));
+ if (is_safe_restriction_clause_for(rinfo, rel))
+ subclauses = lappend(subclauses, rinfo->clause);
+ }
+
+ /*
+ * If nothing could be extracted from this arm, we can't do anything
+ * with this OR clause.
+ */
+ if (subclauses == NIL)
+ return NULL;
+
+ /*
+ * OK, add subclause(s) to the result OR. If we found more than one,
+ * we need an AND node. But if we found only one, and it is itself an
+ * OR node, add its subclauses to the result instead; this is needed
+ * to preserve AND/OR flatness (ie, no OR directly underneath OR).
+ */
+ subclause = (Node *) make_ands_explicit(subclauses);
+ if (is_orclause(subclause))
+ clauselist = list_concat(clauselist,
+ ((BoolExpr *) subclause)->args);
+ else
+ clauselist = lappend(clauselist, subclause);
+ }
+
+ /*
+ * If we got a restriction clause from every arm, wrap them up in an OR
+ * node. (In theory the OR node might be unnecessary, if there was only
+ * one arm --- but then the input OR node was also redundant.)
+ */
+ if (clauselist != NIL)
+ return make_orclause(clauselist);
+ return NULL;
+}
+
+/*
+ * Consider whether a successfully-extracted restriction OR clause is
+ * actually worth using. If so, add it to the planner's data structures,
+ * and adjust the original join clause (join_or_rinfo) to compensate.
+ */
+static void
+consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel,
+ Expr *orclause, RestrictInfo *join_or_rinfo)
+{
+ RestrictInfo *or_rinfo;
+ Selectivity or_selec,
+ orig_selec;
+
+ /*
+ * Build a RestrictInfo from the new OR clause. We can assume it's valid
+ * as a base restriction clause.
+ */
+ or_rinfo = make_restrictinfo(root,
+ orclause,
+ true,
+ false,
+ false,
+ join_or_rinfo->security_level,
+ NULL,
+ NULL,
+ NULL);
+
+ /*
+ * Estimate its selectivity. (We could have done this earlier, but doing
+ * it on the RestrictInfo representation allows the result to get cached,
+ * saving work later.)
+ */
+ or_selec = clause_selectivity(root, (Node *) or_rinfo,
+ 0, JOIN_INNER, NULL);
+
+ /*
+ * The clause is only worth adding to the query if it rejects a useful
+ * fraction of the base relation's rows; otherwise, it's just going to
+ * cause duplicate computation (since we will still have to check the
+ * original OR clause when the join is formed). Somewhat arbitrarily, we
+ * set the selectivity threshold at 0.9.
+ */
+ if (or_selec > 0.9)
+ return; /* forget it */
+
+ /*
+ * OK, add it to the rel's restriction-clause list.
+ */
+ rel->baserestrictinfo = lappend(rel->baserestrictinfo, or_rinfo);
+ rel->baserestrict_min_security = Min(rel->baserestrict_min_security,
+ or_rinfo->security_level);
+
+ /*
+ * Adjust the original join OR clause's cached selectivity to compensate
+ * for the selectivity of the added (but redundant) lower-level qual. This
+ * should result in the join rel getting approximately the same rows
+ * estimate as it would have gotten without all these shenanigans.
+ *
+ * XXX major hack alert: this depends on the assumption that the
+ * selectivity will stay cached.
+ *
+ * XXX another major hack: we adjust only norm_selec, the cached
+ * selectivity for JOIN_INNER semantics, even though the join clause
+ * might've been an outer-join clause. This is partly because we can't
+ * easily identify the relevant SpecialJoinInfo here, and partly because
+ * the linearity assumption we're making would fail anyway. (If it is an
+ * outer-join clause, "rel" must be on the nullable side, else we'd not
+ * have gotten here. So the computation of the join size is going to be
+ * quite nonlinear with respect to the size of "rel", so it's not clear
+ * how we ought to adjust outer_selec even if we could compute its
+ * original value correctly.)
+ */
+ if (or_selec > 0)
+ {
+ SpecialJoinInfo sjinfo;
+
+ /*
+ * Make up a SpecialJoinInfo for JOIN_INNER semantics. (Compare
+ * approx_tuple_count() in costsize.c.)
+ */
+ sjinfo.type = T_SpecialJoinInfo;
+ sjinfo.min_lefthand = bms_difference(join_or_rinfo->clause_relids,
+ rel->relids);
+ sjinfo.min_righthand = rel->relids;
+ sjinfo.syn_lefthand = sjinfo.min_lefthand;
+ sjinfo.syn_righthand = sjinfo.min_righthand;
+ sjinfo.jointype = JOIN_INNER;
+ /* we don't bother trying to make the remaining fields valid */
+ sjinfo.lhs_strict = false;
+ sjinfo.delay_upper_joins = false;
+ sjinfo.semi_can_btree = false;
+ sjinfo.semi_can_hash = false;
+ sjinfo.semi_operators = NIL;
+ sjinfo.semi_rhs_exprs = NIL;
+
+ /* Compute inner-join size */
+ orig_selec = clause_selectivity(root, (Node *) join_or_rinfo,
+ 0, JOIN_INNER, &sjinfo);
+
+ /* And hack cached selectivity so join size remains the same */
+ join_or_rinfo->norm_selec = orig_selec / or_selec;
+ /* ensure result stays in sane range, in particular not "redundant" */
+ if (join_or_rinfo->norm_selec > 1)
+ join_or_rinfo->norm_selec = 1;
+ /* as explained above, we don't touch outer_selec */
+ }
+}
diff --git a/src/backend/optimizer/util/paramassign.c b/src/backend/optimizer/util/paramassign.c
new file mode 100644
index 0000000..12486cb
--- /dev/null
+++ b/src/backend/optimizer/util/paramassign.c
@@ -0,0 +1,591 @@
+/*-------------------------------------------------------------------------
+ *
+ * paramassign.c
+ * Functions for assigning PARAM_EXEC slots during planning.
+ *
+ * This module is responsible for managing three planner data structures:
+ *
+ * root->glob->paramExecTypes: records actual assignments of PARAM_EXEC slots.
+ * The i'th list element holds the data type OID of the i'th parameter slot.
+ * (Elements can be InvalidOid if they represent slots that are needed for
+ * chgParam signaling, but will never hold a value at runtime.) This list is
+ * global to the whole plan since the executor has only one PARAM_EXEC array.
+ * Assignments are permanent for the plan: we never remove entries once added.
+ *
+ * root->plan_params: a list of PlannerParamItem nodes, recording Vars and
+ * PlaceHolderVars that the root's query level needs to supply to lower-level
+ * subqueries, along with the PARAM_EXEC number to use for each such value.
+ * Elements are added to this list while planning a subquery, and the list
+ * is reset to empty after completion of each subquery.
+ *
+ * root->curOuterParams: a list of NestLoopParam nodes, recording Vars and
+ * PlaceHolderVars that some outer level of nestloop needs to pass down to
+ * a lower-level plan node in its righthand side. Elements are added to this
+ * list as createplan.c creates lower Plan nodes that need such Params, and
+ * are removed when it creates a NestLoop Plan node that will supply those
+ * values.
+ *
+ * The latter two data structures are used to prevent creating multiple
+ * PARAM_EXEC slots (each requiring work to fill) when the same upper
+ * SubPlan or NestLoop supplies a value that is referenced in more than
+ * one place in its child plan nodes. However, when the same Var has to
+ * be supplied to different subplan trees by different SubPlan or NestLoop
+ * parent nodes, we don't recognize any commonality; a fresh plan_params or
+ * curOuterParams entry will be made (since the old one has been removed
+ * when we finished processing the earlier SubPlan or NestLoop) and a fresh
+ * PARAM_EXEC number will be assigned. At one time we tried to avoid
+ * allocating duplicate PARAM_EXEC numbers in such cases, but it's harder
+ * than it seems to avoid bugs due to overlapping Param lifetimes, so we
+ * don't risk that anymore. Minimizing the number of PARAM_EXEC slots
+ * doesn't really save much executor work anyway.
+ *
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/paramassign.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "nodes/nodeFuncs.h"
+#include "nodes/plannodes.h"
+#include "optimizer/paramassign.h"
+#include "optimizer/placeholder.h"
+#include "rewrite/rewriteManip.h"
+
+
+/*
+ * Select a PARAM_EXEC number to identify the given Var as a parameter for
+ * the current subquery. (It might already have one.)
+ * Record the need for the Var in the proper upper-level root->plan_params.
+ */
+static int
+assign_param_for_var(PlannerInfo *root, Var *var)
+{
+ ListCell *ppl;
+ PlannerParamItem *pitem;
+ Index levelsup;
+
+ /* Find the query level the Var belongs to */
+ for (levelsup = var->varlevelsup; levelsup > 0; levelsup--)
+ root = root->parent_root;
+
+ /* If there's already a matching PlannerParamItem there, just use it */
+ foreach(ppl, root->plan_params)
+ {
+ pitem = (PlannerParamItem *) lfirst(ppl);
+ if (IsA(pitem->item, Var))
+ {
+ Var *pvar = (Var *) pitem->item;
+
+ /*
+ * This comparison must match _equalVar(), except for ignoring
+ * varlevelsup. Note that _equalVar() ignores varnosyn,
+ * varattnosyn, and location, so this does too.
+ */
+ if (pvar->varno == var->varno &&
+ pvar->varattno == var->varattno &&
+ pvar->vartype == var->vartype &&
+ pvar->vartypmod == var->vartypmod &&
+ pvar->varcollid == var->varcollid)
+ return pitem->paramId;
+ }
+ }
+
+ /* Nope, so make a new one */
+ var = copyObject(var);
+ var->varlevelsup = 0;
+
+ pitem = makeNode(PlannerParamItem);
+ pitem->item = (Node *) var;
+ pitem->paramId = list_length(root->glob->paramExecTypes);
+ root->glob->paramExecTypes = lappend_oid(root->glob->paramExecTypes,
+ var->vartype);
+
+ root->plan_params = lappend(root->plan_params, pitem);
+
+ return pitem->paramId;
+}
+
+/*
+ * Generate a Param node to replace the given Var,
+ * which is expected to have varlevelsup > 0 (ie, it is not local).
+ * Record the need for the Var in the proper upper-level root->plan_params.
+ */
+Param *
+replace_outer_var(PlannerInfo *root, Var *var)
+{
+ Param *retval;
+ int i;
+
+ Assert(var->varlevelsup > 0 && var->varlevelsup < root->query_level);
+
+ /* Find the Var in the appropriate plan_params, or add it if not present */
+ i = assign_param_for_var(root, var);
+
+ retval = makeNode(Param);
+ retval->paramkind = PARAM_EXEC;
+ retval->paramid = i;
+ retval->paramtype = var->vartype;
+ retval->paramtypmod = var->vartypmod;
+ retval->paramcollid = var->varcollid;
+ retval->location = var->location;
+
+ return retval;
+}
+
+/*
+ * Select a PARAM_EXEC number to identify the given PlaceHolderVar as a
+ * parameter for the current subquery. (It might already have one.)
+ * Record the need for the PHV in the proper upper-level root->plan_params.
+ *
+ * This is just like assign_param_for_var, except for PlaceHolderVars.
+ */
+static int
+assign_param_for_placeholdervar(PlannerInfo *root, PlaceHolderVar *phv)
+{
+ ListCell *ppl;
+ PlannerParamItem *pitem;
+ Index levelsup;
+
+ /* Find the query level the PHV belongs to */
+ for (levelsup = phv->phlevelsup; levelsup > 0; levelsup--)
+ root = root->parent_root;
+
+ /* If there's already a matching PlannerParamItem there, just use it */
+ foreach(ppl, root->plan_params)
+ {
+ pitem = (PlannerParamItem *) lfirst(ppl);
+ if (IsA(pitem->item, PlaceHolderVar))
+ {
+ PlaceHolderVar *pphv = (PlaceHolderVar *) pitem->item;
+
+ /* We assume comparing the PHIDs is sufficient */
+ if (pphv->phid == phv->phid)
+ return pitem->paramId;
+ }
+ }
+
+ /* Nope, so make a new one */
+ phv = copyObject(phv);
+ IncrementVarSublevelsUp((Node *) phv, -((int) phv->phlevelsup), 0);
+ Assert(phv->phlevelsup == 0);
+
+ pitem = makeNode(PlannerParamItem);
+ pitem->item = (Node *) phv;
+ pitem->paramId = list_length(root->glob->paramExecTypes);
+ root->glob->paramExecTypes = lappend_oid(root->glob->paramExecTypes,
+ exprType((Node *) phv->phexpr));
+
+ root->plan_params = lappend(root->plan_params, pitem);
+
+ return pitem->paramId;
+}
+
+/*
+ * Generate a Param node to replace the given PlaceHolderVar,
+ * which is expected to have phlevelsup > 0 (ie, it is not local).
+ * Record the need for the PHV in the proper upper-level root->plan_params.
+ *
+ * This is just like replace_outer_var, except for PlaceHolderVars.
+ */
+Param *
+replace_outer_placeholdervar(PlannerInfo *root, PlaceHolderVar *phv)
+{
+ Param *retval;
+ int i;
+
+ Assert(phv->phlevelsup > 0 && phv->phlevelsup < root->query_level);
+
+ /* Find the PHV in the appropriate plan_params, or add it if not present */
+ i = assign_param_for_placeholdervar(root, phv);
+
+ retval = makeNode(Param);
+ retval->paramkind = PARAM_EXEC;
+ retval->paramid = i;
+ retval->paramtype = exprType((Node *) phv->phexpr);
+ retval->paramtypmod = exprTypmod((Node *) phv->phexpr);
+ retval->paramcollid = exprCollation((Node *) phv->phexpr);
+ retval->location = -1;
+
+ return retval;
+}
+
+/*
+ * Generate a Param node to replace the given Aggref
+ * which is expected to have agglevelsup > 0 (ie, it is not local).
+ * Record the need for the Aggref in the proper upper-level root->plan_params.
+ */
+Param *
+replace_outer_agg(PlannerInfo *root, Aggref *agg)
+{
+ Param *retval;
+ PlannerParamItem *pitem;
+ Index levelsup;
+
+ Assert(agg->agglevelsup > 0 && agg->agglevelsup < root->query_level);
+
+ /* Find the query level the Aggref belongs to */
+ for (levelsup = agg->agglevelsup; levelsup > 0; levelsup--)
+ root = root->parent_root;
+
+ /*
+ * It does not seem worthwhile to try to de-duplicate references to outer
+ * aggs. Just make a new slot every time.
+ */
+ agg = copyObject(agg);
+ IncrementVarSublevelsUp((Node *) agg, -((int) agg->agglevelsup), 0);
+ Assert(agg->agglevelsup == 0);
+
+ pitem = makeNode(PlannerParamItem);
+ pitem->item = (Node *) agg;
+ pitem->paramId = list_length(root->glob->paramExecTypes);
+ root->glob->paramExecTypes = lappend_oid(root->glob->paramExecTypes,
+ agg->aggtype);
+
+ root->plan_params = lappend(root->plan_params, pitem);
+
+ retval = makeNode(Param);
+ retval->paramkind = PARAM_EXEC;
+ retval->paramid = pitem->paramId;
+ retval->paramtype = agg->aggtype;
+ retval->paramtypmod = -1;
+ retval->paramcollid = agg->aggcollid;
+ retval->location = agg->location;
+
+ return retval;
+}
+
+/*
+ * Generate a Param node to replace the given GroupingFunc expression which is
+ * expected to have agglevelsup > 0 (ie, it is not local).
+ * Record the need for the GroupingFunc in the proper upper-level
+ * root->plan_params.
+ */
+Param *
+replace_outer_grouping(PlannerInfo *root, GroupingFunc *grp)
+{
+ Param *retval;
+ PlannerParamItem *pitem;
+ Index levelsup;
+ Oid ptype = exprType((Node *) grp);
+
+ Assert(grp->agglevelsup > 0 && grp->agglevelsup < root->query_level);
+
+ /* Find the query level the GroupingFunc belongs to */
+ for (levelsup = grp->agglevelsup; levelsup > 0; levelsup--)
+ root = root->parent_root;
+
+ /*
+ * It does not seem worthwhile to try to de-duplicate references to outer
+ * aggs. Just make a new slot every time.
+ */
+ grp = copyObject(grp);
+ IncrementVarSublevelsUp((Node *) grp, -((int) grp->agglevelsup), 0);
+ Assert(grp->agglevelsup == 0);
+
+ pitem = makeNode(PlannerParamItem);
+ pitem->item = (Node *) grp;
+ pitem->paramId = list_length(root->glob->paramExecTypes);
+ root->glob->paramExecTypes = lappend_oid(root->glob->paramExecTypes,
+ ptype);
+
+ root->plan_params = lappend(root->plan_params, pitem);
+
+ retval = makeNode(Param);
+ retval->paramkind = PARAM_EXEC;
+ retval->paramid = pitem->paramId;
+ retval->paramtype = ptype;
+ retval->paramtypmod = -1;
+ retval->paramcollid = InvalidOid;
+ retval->location = grp->location;
+
+ return retval;
+}
+
+/*
+ * Generate a Param node to replace the given Var,
+ * which is expected to come from some upper NestLoop plan node.
+ * Record the need for the Var in root->curOuterParams.
+ */
+Param *
+replace_nestloop_param_var(PlannerInfo *root, Var *var)
+{
+ Param *param;
+ NestLoopParam *nlp;
+ ListCell *lc;
+
+ /* Is this Var already listed in root->curOuterParams? */
+ foreach(lc, root->curOuterParams)
+ {
+ nlp = (NestLoopParam *) lfirst(lc);
+ if (equal(var, nlp->paramval))
+ {
+ /* Yes, so just make a Param referencing this NLP's slot */
+ param = makeNode(Param);
+ param->paramkind = PARAM_EXEC;
+ param->paramid = nlp->paramno;
+ param->paramtype = var->vartype;
+ param->paramtypmod = var->vartypmod;
+ param->paramcollid = var->varcollid;
+ param->location = var->location;
+ return param;
+ }
+ }
+
+ /* No, so assign a PARAM_EXEC slot for a new NLP */
+ param = generate_new_exec_param(root,
+ var->vartype,
+ var->vartypmod,
+ var->varcollid);
+ param->location = var->location;
+
+ /* Add it to the list of required NLPs */
+ nlp = makeNode(NestLoopParam);
+ nlp->paramno = param->paramid;
+ nlp->paramval = copyObject(var);
+ root->curOuterParams = lappend(root->curOuterParams, nlp);
+
+ /* And return the replacement Param */
+ return param;
+}
+
+/*
+ * Generate a Param node to replace the given PlaceHolderVar,
+ * which is expected to come from some upper NestLoop plan node.
+ * Record the need for the PHV in root->curOuterParams.
+ *
+ * This is just like replace_nestloop_param_var, except for PlaceHolderVars.
+ */
+Param *
+replace_nestloop_param_placeholdervar(PlannerInfo *root, PlaceHolderVar *phv)
+{
+ Param *param;
+ NestLoopParam *nlp;
+ ListCell *lc;
+
+ /* Is this PHV already listed in root->curOuterParams? */
+ foreach(lc, root->curOuterParams)
+ {
+ nlp = (NestLoopParam *) lfirst(lc);
+ if (equal(phv, nlp->paramval))
+ {
+ /* Yes, so just make a Param referencing this NLP's slot */
+ param = makeNode(Param);
+ param->paramkind = PARAM_EXEC;
+ param->paramid = nlp->paramno;
+ param->paramtype = exprType((Node *) phv->phexpr);
+ param->paramtypmod = exprTypmod((Node *) phv->phexpr);
+ param->paramcollid = exprCollation((Node *) phv->phexpr);
+ param->location = -1;
+ return param;
+ }
+ }
+
+ /* No, so assign a PARAM_EXEC slot for a new NLP */
+ param = generate_new_exec_param(root,
+ exprType((Node *) phv->phexpr),
+ exprTypmod((Node *) phv->phexpr),
+ exprCollation((Node *) phv->phexpr));
+
+ /* Add it to the list of required NLPs */
+ nlp = makeNode(NestLoopParam);
+ nlp->paramno = param->paramid;
+ nlp->paramval = (Var *) copyObject(phv);
+ root->curOuterParams = lappend(root->curOuterParams, nlp);
+
+ /* And return the replacement Param */
+ return param;
+}
+
+/*
+ * process_subquery_nestloop_params
+ * Handle params of a parameterized subquery that need to be fed
+ * from an outer nestloop.
+ *
+ * Currently, that would be *all* params that a subquery in FROM has demanded
+ * from the current query level, since they must be LATERAL references.
+ *
+ * subplan_params is a list of PlannerParamItems that we intend to pass to
+ * a subquery-in-FROM. (This was constructed in root->plan_params while
+ * planning the subquery, but isn't there anymore when this is called.)
+ *
+ * The subplan's references to the outer variables are already represented
+ * as PARAM_EXEC Params, since that conversion was done by the routines above
+ * while planning the subquery. So we need not modify the subplan or the
+ * PlannerParamItems here. What we do need to do is add entries to
+ * root->curOuterParams to signal the parent nestloop plan node that it must
+ * provide these values. This differs from replace_nestloop_param_var in
+ * that the PARAM_EXEC slots to use have already been determined.
+ *
+ * Note that we also use root->curOuterRels as an implicit parameter for
+ * sanity checks.
+ */
+void
+process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params)
+{
+ ListCell *lc;
+
+ foreach(lc, subplan_params)
+ {
+ PlannerParamItem *pitem = lfirst_node(PlannerParamItem, lc);
+
+ if (IsA(pitem->item, Var))
+ {
+ Var *var = (Var *) pitem->item;
+ NestLoopParam *nlp;
+ ListCell *lc;
+
+ /* If not from a nestloop outer rel, complain */
+ if (!bms_is_member(var->varno, root->curOuterRels))
+ elog(ERROR, "non-LATERAL parameter required by subquery");
+
+ /* Is this param already listed in root->curOuterParams? */
+ foreach(lc, root->curOuterParams)
+ {
+ nlp = (NestLoopParam *) lfirst(lc);
+ if (nlp->paramno == pitem->paramId)
+ {
+ Assert(equal(var, nlp->paramval));
+ /* Present, so nothing to do */
+ break;
+ }
+ }
+ if (lc == NULL)
+ {
+ /* No, so add it */
+ nlp = makeNode(NestLoopParam);
+ nlp->paramno = pitem->paramId;
+ nlp->paramval = copyObject(var);
+ root->curOuterParams = lappend(root->curOuterParams, nlp);
+ }
+ }
+ else if (IsA(pitem->item, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) pitem->item;
+ NestLoopParam *nlp;
+ ListCell *lc;
+
+ /* If not from a nestloop outer rel, complain */
+ if (!bms_is_subset(find_placeholder_info(root, phv, false)->ph_eval_at,
+ root->curOuterRels))
+ elog(ERROR, "non-LATERAL parameter required by subquery");
+
+ /* Is this param already listed in root->curOuterParams? */
+ foreach(lc, root->curOuterParams)
+ {
+ nlp = (NestLoopParam *) lfirst(lc);
+ if (nlp->paramno == pitem->paramId)
+ {
+ Assert(equal(phv, nlp->paramval));
+ /* Present, so nothing to do */
+ break;
+ }
+ }
+ if (lc == NULL)
+ {
+ /* No, so add it */
+ nlp = makeNode(NestLoopParam);
+ nlp->paramno = pitem->paramId;
+ nlp->paramval = (Var *) copyObject(phv);
+ root->curOuterParams = lappend(root->curOuterParams, nlp);
+ }
+ }
+ else
+ elog(ERROR, "unexpected type of subquery parameter");
+ }
+}
+
+/*
+ * Identify any NestLoopParams that should be supplied by a NestLoop plan
+ * node with the specified lefthand rels. Remove them from the active
+ * root->curOuterParams list and return them as the result list.
+ */
+List *
+identify_current_nestloop_params(PlannerInfo *root, Relids leftrelids)
+{
+ List *result;
+ ListCell *cell;
+
+ result = NIL;
+ foreach(cell, root->curOuterParams)
+ {
+ NestLoopParam *nlp = (NestLoopParam *) lfirst(cell);
+
+ /*
+ * We are looking for Vars and PHVs that can be supplied by the
+ * lefthand rels. The "bms_overlap" test is just an optimization to
+ * allow skipping find_placeholder_info() if the PHV couldn't match.
+ */
+ if (IsA(nlp->paramval, Var) &&
+ bms_is_member(nlp->paramval->varno, leftrelids))
+ {
+ root->curOuterParams = foreach_delete_current(root->curOuterParams,
+ cell);
+ result = lappend(result, nlp);
+ }
+ else if (IsA(nlp->paramval, PlaceHolderVar) &&
+ bms_overlap(((PlaceHolderVar *) nlp->paramval)->phrels,
+ leftrelids) &&
+ bms_is_subset(find_placeholder_info(root,
+ (PlaceHolderVar *) nlp->paramval,
+ false)->ph_eval_at,
+ leftrelids))
+ {
+ root->curOuterParams = foreach_delete_current(root->curOuterParams,
+ cell);
+ result = lappend(result, nlp);
+ }
+ }
+ return result;
+}
+
+/*
+ * Generate a new Param node that will not conflict with any other.
+ *
+ * This is used to create Params representing subplan outputs or
+ * NestLoop parameters.
+ *
+ * We don't need to build a PlannerParamItem for such a Param, but we do
+ * need to make sure we record the type in paramExecTypes (otherwise,
+ * there won't be a slot allocated for it).
+ */
+Param *
+generate_new_exec_param(PlannerInfo *root, Oid paramtype, int32 paramtypmod,
+ Oid paramcollation)
+{
+ Param *retval;
+
+ retval = makeNode(Param);
+ retval->paramkind = PARAM_EXEC;
+ retval->paramid = list_length(root->glob->paramExecTypes);
+ root->glob->paramExecTypes = lappend_oid(root->glob->paramExecTypes,
+ paramtype);
+ retval->paramtype = paramtype;
+ retval->paramtypmod = paramtypmod;
+ retval->paramcollid = paramcollation;
+ retval->location = -1;
+
+ return retval;
+}
+
+/*
+ * Assign a (nonnegative) PARAM_EXEC ID for a special parameter (one that
+ * is not actually used to carry a value at runtime). Such parameters are
+ * used for special runtime signaling purposes, such as connecting a
+ * recursive union node to its worktable scan node or forcing plan
+ * re-evaluation within the EvalPlanQual mechanism. No actual Param node
+ * exists with this ID, however.
+ */
+int
+assign_special_exec_param(PlannerInfo *root)
+{
+ int paramId = list_length(root->glob->paramExecTypes);
+
+ root->glob->paramExecTypes = lappend_oid(root->glob->paramExecTypes,
+ InvalidOid);
+ return paramId;
+}
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
new file mode 100644
index 0000000..33affaf
--- /dev/null
+++ b/src/backend/optimizer/util/pathnode.c
@@ -0,0 +1,4298 @@
+/*-------------------------------------------------------------------------
+ *
+ * pathnode.c
+ * Routines to manipulate pathlists and create path nodes
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/pathnode.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <math.h>
+
+#include "foreign/fdwapi.h"
+#include "miscadmin.h"
+#include "nodes/extensible.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/appendinfo.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/planmain.h"
+#include "optimizer/prep.h"
+#include "optimizer/restrictinfo.h"
+#include "optimizer/tlist.h"
+#include "parser/parsetree.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/selfuncs.h"
+
+typedef enum
+{
+ COSTS_EQUAL, /* path costs are fuzzily equal */
+ COSTS_BETTER1, /* first path is cheaper than second */
+ COSTS_BETTER2, /* second path is cheaper than first */
+ COSTS_DIFFERENT /* neither path dominates the other on cost */
+} PathCostComparison;
+
+/*
+ * STD_FUZZ_FACTOR is the normal fuzz factor for compare_path_costs_fuzzily.
+ * XXX is it worth making this user-controllable? It provides a tradeoff
+ * between planner runtime and the accuracy of path cost comparisons.
+ */
+#define STD_FUZZ_FACTOR 1.01
+
+static List *translate_sub_tlist(List *tlist, int relid);
+static int append_total_cost_compare(const ListCell *a, const ListCell *b);
+static int append_startup_cost_compare(const ListCell *a, const ListCell *b);
+static List *reparameterize_pathlist_by_child(PlannerInfo *root,
+ List *pathlist,
+ RelOptInfo *child_rel);
+
+
+/*****************************************************************************
+ * MISC. PATH UTILITIES
+ *****************************************************************************/
+
+/*
+ * compare_path_costs
+ * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
+ * or more expensive than path2 for the specified criterion.
+ */
+int
+compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
+{
+ if (criterion == STARTUP_COST)
+ {
+ if (path1->startup_cost < path2->startup_cost)
+ return -1;
+ if (path1->startup_cost > path2->startup_cost)
+ return +1;
+
+ /*
+ * If paths have the same startup cost (not at all unlikely), order
+ * them by total cost.
+ */
+ if (path1->total_cost < path2->total_cost)
+ return -1;
+ if (path1->total_cost > path2->total_cost)
+ return +1;
+ }
+ else
+ {
+ if (path1->total_cost < path2->total_cost)
+ return -1;
+ if (path1->total_cost > path2->total_cost)
+ return +1;
+
+ /*
+ * If paths have the same total cost, order them by startup cost.
+ */
+ if (path1->startup_cost < path2->startup_cost)
+ return -1;
+ if (path1->startup_cost > path2->startup_cost)
+ return +1;
+ }
+ return 0;
+}
+
+/*
+ * compare_fractional_path_costs
+ * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
+ * or more expensive than path2 for fetching the specified fraction
+ * of the total tuples.
+ *
+ * If fraction is <= 0 or > 1, we interpret it as 1, ie, we select the
+ * path with the cheaper total_cost.
+ */
+int
+compare_fractional_path_costs(Path *path1, Path *path2,
+ double fraction)
+{
+ Cost cost1,
+ cost2;
+
+ if (fraction <= 0.0 || fraction >= 1.0)
+ return compare_path_costs(path1, path2, TOTAL_COST);
+ cost1 = path1->startup_cost +
+ fraction * (path1->total_cost - path1->startup_cost);
+ cost2 = path2->startup_cost +
+ fraction * (path2->total_cost - path2->startup_cost);
+ if (cost1 < cost2)
+ return -1;
+ if (cost1 > cost2)
+ return +1;
+ return 0;
+}
+
+/*
+ * compare_path_costs_fuzzily
+ * Compare the costs of two paths to see if either can be said to
+ * dominate the other.
+ *
+ * We use fuzzy comparisons so that add_path() can avoid keeping both of
+ * a pair of paths that really have insignificantly different cost.
+ *
+ * The fuzz_factor argument must be 1.0 plus delta, where delta is the
+ * fraction of the smaller cost that is considered to be a significant
+ * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
+ * be 1% of the smaller cost.
+ *
+ * The two paths are said to have "equal" costs if both startup and total
+ * costs are fuzzily the same. Path1 is said to be better than path2 if
+ * it has fuzzily better startup cost and fuzzily no worse total cost,
+ * or if it has fuzzily better total cost and fuzzily no worse startup cost.
+ * Path2 is better than path1 if the reverse holds. Finally, if one path
+ * is fuzzily better than the other on startup cost and fuzzily worse on
+ * total cost, we just say that their costs are "different", since neither
+ * dominates the other across the whole performance spectrum.
+ *
+ * This function also enforces a policy rule that paths for which the relevant
+ * one of parent->consider_startup and parent->consider_param_startup is false
+ * cannot survive comparisons solely on the grounds of good startup cost, so
+ * we never return COSTS_DIFFERENT when that is true for the total-cost loser.
+ * (But if total costs are fuzzily equal, we compare startup costs anyway,
+ * in hopes of eliminating one path or the other.)
+ */
+static PathCostComparison
+compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
+{
+#define CONSIDER_PATH_STARTUP_COST(p) \
+ ((p)->param_info == NULL ? (p)->parent->consider_startup : (p)->parent->consider_param_startup)
+
+ /*
+ * Check total cost first since it's more likely to be different; many
+ * paths have zero startup cost.
+ */
+ if (path1->total_cost > path2->total_cost * fuzz_factor)
+ {
+ /* path1 fuzzily worse on total cost */
+ if (CONSIDER_PATH_STARTUP_COST(path1) &&
+ path2->startup_cost > path1->startup_cost * fuzz_factor)
+ {
+ /* ... but path2 fuzzily worse on startup, so DIFFERENT */
+ return COSTS_DIFFERENT;
+ }
+ /* else path2 dominates */
+ return COSTS_BETTER2;
+ }
+ if (path2->total_cost > path1->total_cost * fuzz_factor)
+ {
+ /* path2 fuzzily worse on total cost */
+ if (CONSIDER_PATH_STARTUP_COST(path2) &&
+ path1->startup_cost > path2->startup_cost * fuzz_factor)
+ {
+ /* ... but path1 fuzzily worse on startup, so DIFFERENT */
+ return COSTS_DIFFERENT;
+ }
+ /* else path1 dominates */
+ return COSTS_BETTER1;
+ }
+ /* fuzzily the same on total cost ... */
+ if (path1->startup_cost > path2->startup_cost * fuzz_factor)
+ {
+ /* ... but path1 fuzzily worse on startup, so path2 wins */
+ return COSTS_BETTER2;
+ }
+ if (path2->startup_cost > path1->startup_cost * fuzz_factor)
+ {
+ /* ... but path2 fuzzily worse on startup, so path1 wins */
+ return COSTS_BETTER1;
+ }
+ /* fuzzily the same on both costs */
+ return COSTS_EQUAL;
+
+#undef CONSIDER_PATH_STARTUP_COST
+}
+
+/*
+ * set_cheapest
+ * Find the minimum-cost paths from among a relation's paths,
+ * and save them in the rel's cheapest-path fields.
+ *
+ * cheapest_total_path is normally the cheapest-total-cost unparameterized
+ * path; but if there are no unparameterized paths, we assign it to be the
+ * best (cheapest least-parameterized) parameterized path. However, only
+ * unparameterized paths are considered candidates for cheapest_startup_path,
+ * so that will be NULL if there are no unparameterized paths.
+ *
+ * The cheapest_parameterized_paths list collects all parameterized paths
+ * that have survived the add_path() tournament for this relation. (Since
+ * add_path ignores pathkeys for a parameterized path, these will be paths
+ * that have best cost or best row count for their parameterization. We
+ * may also have both a parallel-safe and a non-parallel-safe path in some
+ * cases for the same parameterization in some cases, but this should be
+ * relatively rare since, most typically, all paths for the same relation
+ * will be parallel-safe or none of them will.)
+ *
+ * cheapest_parameterized_paths always includes the cheapest-total
+ * unparameterized path, too, if there is one; the users of that list find
+ * it more convenient if that's included.
+ *
+ * This is normally called only after we've finished constructing the path
+ * list for the rel node.
+ */
+void
+set_cheapest(RelOptInfo *parent_rel)
+{
+ Path *cheapest_startup_path;
+ Path *cheapest_total_path;
+ Path *best_param_path;
+ List *parameterized_paths;
+ ListCell *p;
+
+ Assert(IsA(parent_rel, RelOptInfo));
+
+ if (parent_rel->pathlist == NIL)
+ elog(ERROR, "could not devise a query plan for the given query");
+
+ cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
+ parameterized_paths = NIL;
+
+ foreach(p, parent_rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(p);
+ int cmp;
+
+ if (path->param_info)
+ {
+ /* Parameterized path, so add it to parameterized_paths */
+ parameterized_paths = lappend(parameterized_paths, path);
+
+ /*
+ * If we have an unparameterized cheapest-total, we no longer care
+ * about finding the best parameterized path, so move on.
+ */
+ if (cheapest_total_path)
+ continue;
+
+ /*
+ * Otherwise, track the best parameterized path, which is the one
+ * with least total cost among those of the minimum
+ * parameterization.
+ */
+ if (best_param_path == NULL)
+ best_param_path = path;
+ else
+ {
+ switch (bms_subset_compare(PATH_REQ_OUTER(path),
+ PATH_REQ_OUTER(best_param_path)))
+ {
+ case BMS_EQUAL:
+ /* keep the cheaper one */
+ if (compare_path_costs(path, best_param_path,
+ TOTAL_COST) < 0)
+ best_param_path = path;
+ break;
+ case BMS_SUBSET1:
+ /* new path is less-parameterized */
+ best_param_path = path;
+ break;
+ case BMS_SUBSET2:
+ /* old path is less-parameterized, keep it */
+ break;
+ case BMS_DIFFERENT:
+
+ /*
+ * This means that neither path has the least possible
+ * parameterization for the rel. We'll sit on the old
+ * path until something better comes along.
+ */
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* Unparameterized path, so consider it for cheapest slots */
+ if (cheapest_total_path == NULL)
+ {
+ cheapest_startup_path = cheapest_total_path = path;
+ continue;
+ }
+
+ /*
+ * If we find two paths of identical costs, try to keep the
+ * better-sorted one. The paths might have unrelated sort
+ * orderings, in which case we can only guess which might be
+ * better to keep, but if one is superior then we definitely
+ * should keep that one.
+ */
+ cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
+ if (cmp > 0 ||
+ (cmp == 0 &&
+ compare_pathkeys(cheapest_startup_path->pathkeys,
+ path->pathkeys) == PATHKEYS_BETTER2))
+ cheapest_startup_path = path;
+
+ cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
+ if (cmp > 0 ||
+ (cmp == 0 &&
+ compare_pathkeys(cheapest_total_path->pathkeys,
+ path->pathkeys) == PATHKEYS_BETTER2))
+ cheapest_total_path = path;
+ }
+ }
+
+ /* Add cheapest unparameterized path, if any, to parameterized_paths */
+ if (cheapest_total_path)
+ parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
+
+ /*
+ * If there is no unparameterized path, use the best parameterized path as
+ * cheapest_total_path (but not as cheapest_startup_path).
+ */
+ if (cheapest_total_path == NULL)
+ cheapest_total_path = best_param_path;
+ Assert(cheapest_total_path != NULL);
+
+ parent_rel->cheapest_startup_path = cheapest_startup_path;
+ parent_rel->cheapest_total_path = cheapest_total_path;
+ parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
+ parent_rel->cheapest_parameterized_paths = parameterized_paths;
+}
+
+/*
+ * add_path
+ * Consider a potential implementation path for the specified parent rel,
+ * and add it to the rel's pathlist if it is worthy of consideration.
+ * A path is worthy if it has a better sort order (better pathkeys) or
+ * cheaper cost (on either dimension), or generates fewer rows, than any
+ * existing path that has the same or superset parameterization rels.
+ * We also consider parallel-safe paths more worthy than others.
+ *
+ * We also remove from the rel's pathlist any old paths that are dominated
+ * by new_path --- that is, new_path is cheaper, at least as well ordered,
+ * generates no more rows, requires no outer rels not required by the old
+ * path, and is no less parallel-safe.
+ *
+ * In most cases, a path with a superset parameterization will generate
+ * fewer rows (since it has more join clauses to apply), so that those two
+ * figures of merit move in opposite directions; this means that a path of
+ * one parameterization can seldom dominate a path of another. But such
+ * cases do arise, so we make the full set of checks anyway.
+ *
+ * There are two policy decisions embedded in this function, along with
+ * its sibling add_path_precheck. First, we treat all parameterized paths
+ * as having NIL pathkeys, so that they cannot win comparisons on the
+ * basis of sort order. This is to reduce the number of parameterized
+ * paths that are kept; see discussion in src/backend/optimizer/README.
+ *
+ * Second, we only consider cheap startup cost to be interesting if
+ * parent_rel->consider_startup is true for an unparameterized path, or
+ * parent_rel->consider_param_startup is true for a parameterized one.
+ * Again, this allows discarding useless paths sooner.
+ *
+ * The pathlist is kept sorted by total_cost, with cheaper paths
+ * at the front. Within this routine, that's simply a speed hack:
+ * doing it that way makes it more likely that we will reject an inferior
+ * path after a few comparisons, rather than many comparisons.
+ * However, add_path_precheck relies on this ordering to exit early
+ * when possible.
+ *
+ * NOTE: discarded Path objects are immediately pfree'd to reduce planner
+ * memory consumption. We dare not try to free the substructure of a Path,
+ * since much of it may be shared with other Paths or the query tree itself;
+ * but just recycling discarded Path nodes is a very useful savings in
+ * a large join tree. We can recycle the List nodes of pathlist, too.
+ *
+ * As noted in optimizer/README, deleting a previously-accepted Path is
+ * safe because we know that Paths of this rel cannot yet be referenced
+ * from any other rel, such as a higher-level join. However, in some cases
+ * it is possible that a Path is referenced by another Path for its own
+ * rel; we must not delete such a Path, even if it is dominated by the new
+ * Path. Currently this occurs only for IndexPath objects, which may be
+ * referenced as children of BitmapHeapPaths as well as being paths in
+ * their own right. Hence, we don't pfree IndexPaths when rejecting them.
+ *
+ * 'parent_rel' is the relation entry to which the path corresponds.
+ * 'new_path' is a potential path for parent_rel.
+ *
+ * Returns nothing, but modifies parent_rel->pathlist.
+ */
+void
+add_path(RelOptInfo *parent_rel, Path *new_path)
+{
+ bool accept_new = true; /* unless we find a superior old path */
+ int insert_at = 0; /* where to insert new item */
+ List *new_path_pathkeys;
+ ListCell *p1;
+
+ /*
+ * This is a convenient place to check for query cancel --- no part of the
+ * planner goes very long without calling add_path().
+ */
+ CHECK_FOR_INTERRUPTS();
+
+ /* Pretend parameterized paths have no pathkeys, per comment above */
+ new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
+
+ /*
+ * Loop to check proposed new path against old paths. Note it is possible
+ * for more than one old path to be tossed out because new_path dominates
+ * it.
+ */
+ foreach(p1, parent_rel->pathlist)
+ {
+ Path *old_path = (Path *) lfirst(p1);
+ bool remove_old = false; /* unless new proves superior */
+ PathCostComparison costcmp;
+ PathKeysComparison keyscmp;
+ BMS_Comparison outercmp;
+
+ /*
+ * Do a fuzzy cost comparison with standard fuzziness limit.
+ */
+ costcmp = compare_path_costs_fuzzily(new_path, old_path,
+ STD_FUZZ_FACTOR);
+
+ /*
+ * If the two paths compare differently for startup and total cost,
+ * then we want to keep both, and we can skip comparing pathkeys and
+ * required_outer rels. If they compare the same, proceed with the
+ * other comparisons. Row count is checked last. (We make the tests
+ * in this order because the cost comparison is most likely to turn
+ * out "different", and the pathkeys comparison next most likely. As
+ * explained above, row count very seldom makes a difference, so even
+ * though it's cheap to compare there's not much point in checking it
+ * earlier.)
+ */
+ if (costcmp != COSTS_DIFFERENT)
+ {
+ /* Similarly check to see if either dominates on pathkeys */
+ List *old_path_pathkeys;
+
+ old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
+ keyscmp = compare_pathkeys(new_path_pathkeys,
+ old_path_pathkeys);
+ if (keyscmp != PATHKEYS_DIFFERENT)
+ {
+ switch (costcmp)
+ {
+ case COSTS_EQUAL:
+ outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
+ PATH_REQ_OUTER(old_path));
+ if (keyscmp == PATHKEYS_BETTER1)
+ {
+ if ((outercmp == BMS_EQUAL ||
+ outercmp == BMS_SUBSET1) &&
+ new_path->rows <= old_path->rows &&
+ new_path->parallel_safe >= old_path->parallel_safe)
+ remove_old = true; /* new dominates old */
+ }
+ else if (keyscmp == PATHKEYS_BETTER2)
+ {
+ if ((outercmp == BMS_EQUAL ||
+ outercmp == BMS_SUBSET2) &&
+ new_path->rows >= old_path->rows &&
+ new_path->parallel_safe <= old_path->parallel_safe)
+ accept_new = false; /* old dominates new */
+ }
+ else /* keyscmp == PATHKEYS_EQUAL */
+ {
+ if (outercmp == BMS_EQUAL)
+ {
+ /*
+ * Same pathkeys and outer rels, and fuzzily
+ * the same cost, so keep just one; to decide
+ * which, first check parallel-safety, then
+ * rows, then do a fuzzy cost comparison with
+ * very small fuzz limit. (We used to do an
+ * exact cost comparison, but that results in
+ * annoying platform-specific plan variations
+ * due to roundoff in the cost estimates.) If
+ * things are still tied, arbitrarily keep
+ * only the old path. Notice that we will
+ * keep only the old path even if the
+ * less-fuzzy comparison decides the startup
+ * and total costs compare differently.
+ */
+ if (new_path->parallel_safe >
+ old_path->parallel_safe)
+ remove_old = true; /* new dominates old */
+ else if (new_path->parallel_safe <
+ old_path->parallel_safe)
+ accept_new = false; /* old dominates new */
+ else if (new_path->rows < old_path->rows)
+ remove_old = true; /* new dominates old */
+ else if (new_path->rows > old_path->rows)
+ accept_new = false; /* old dominates new */
+ else if (compare_path_costs_fuzzily(new_path,
+ old_path,
+ 1.0000000001) == COSTS_BETTER1)
+ remove_old = true; /* new dominates old */
+ else
+ accept_new = false; /* old equals or
+ * dominates new */
+ }
+ else if (outercmp == BMS_SUBSET1 &&
+ new_path->rows <= old_path->rows &&
+ new_path->parallel_safe >= old_path->parallel_safe)
+ remove_old = true; /* new dominates old */
+ else if (outercmp == BMS_SUBSET2 &&
+ new_path->rows >= old_path->rows &&
+ new_path->parallel_safe <= old_path->parallel_safe)
+ accept_new = false; /* old dominates new */
+ /* else different parameterizations, keep both */
+ }
+ break;
+ case COSTS_BETTER1:
+ if (keyscmp != PATHKEYS_BETTER2)
+ {
+ outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
+ PATH_REQ_OUTER(old_path));
+ if ((outercmp == BMS_EQUAL ||
+ outercmp == BMS_SUBSET1) &&
+ new_path->rows <= old_path->rows &&
+ new_path->parallel_safe >= old_path->parallel_safe)
+ remove_old = true; /* new dominates old */
+ }
+ break;
+ case COSTS_BETTER2:
+ if (keyscmp != PATHKEYS_BETTER1)
+ {
+ outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
+ PATH_REQ_OUTER(old_path));
+ if ((outercmp == BMS_EQUAL ||
+ outercmp == BMS_SUBSET2) &&
+ new_path->rows >= old_path->rows &&
+ new_path->parallel_safe <= old_path->parallel_safe)
+ accept_new = false; /* old dominates new */
+ }
+ break;
+ case COSTS_DIFFERENT:
+
+ /*
+ * can't get here, but keep this case to keep compiler
+ * quiet
+ */
+ break;
+ }
+ }
+ }
+
+ /*
+ * Remove current element from pathlist if dominated by new.
+ */
+ if (remove_old)
+ {
+ parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
+ p1);
+
+ /*
+ * Delete the data pointed-to by the deleted cell, if possible
+ */
+ if (!IsA(old_path, IndexPath))
+ pfree(old_path);
+ }
+ else
+ {
+ /* new belongs after this old path if it has cost >= old's */
+ if (new_path->total_cost >= old_path->total_cost)
+ insert_at = foreach_current_index(p1) + 1;
+ }
+
+ /*
+ * If we found an old path that dominates new_path, we can quit
+ * scanning the pathlist; we will not add new_path, and we assume
+ * new_path cannot dominate any other elements of the pathlist.
+ */
+ if (!accept_new)
+ break;
+ }
+
+ if (accept_new)
+ {
+ /* Accept the new path: insert it at proper place in pathlist */
+ parent_rel->pathlist =
+ list_insert_nth(parent_rel->pathlist, insert_at, new_path);
+ }
+ else
+ {
+ /* Reject and recycle the new path */
+ if (!IsA(new_path, IndexPath))
+ pfree(new_path);
+ }
+}
+
+/*
+ * add_path_precheck
+ * Check whether a proposed new path could possibly get accepted.
+ * We assume we know the path's pathkeys and parameterization accurately,
+ * and have lower bounds for its costs.
+ *
+ * Note that we do not know the path's rowcount, since getting an estimate for
+ * that is too expensive to do before prechecking. We assume here that paths
+ * of a superset parameterization will generate fewer rows; if that holds,
+ * then paths with different parameterizations cannot dominate each other
+ * and so we can simply ignore existing paths of another parameterization.
+ * (In the infrequent cases where that rule of thumb fails, add_path will
+ * get rid of the inferior path.)
+ *
+ * At the time this is called, we haven't actually built a Path structure,
+ * so the required information has to be passed piecemeal.
+ */
+bool
+add_path_precheck(RelOptInfo *parent_rel,
+ Cost startup_cost, Cost total_cost,
+ List *pathkeys, Relids required_outer)
+{
+ List *new_path_pathkeys;
+ bool consider_startup;
+ ListCell *p1;
+
+ /* Pretend parameterized paths have no pathkeys, per add_path policy */
+ new_path_pathkeys = required_outer ? NIL : pathkeys;
+
+ /* Decide whether new path's startup cost is interesting */
+ consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
+
+ foreach(p1, parent_rel->pathlist)
+ {
+ Path *old_path = (Path *) lfirst(p1);
+ PathKeysComparison keyscmp;
+
+ /*
+ * We are looking for an old_path with the same parameterization (and
+ * by assumption the same rowcount) that dominates the new path on
+ * pathkeys as well as both cost metrics. If we find one, we can
+ * reject the new path.
+ *
+ * Cost comparisons here should match compare_path_costs_fuzzily.
+ */
+ if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
+ {
+ /* new path can win on startup cost only if consider_startup */
+ if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
+ !consider_startup)
+ {
+ /* new path loses on cost, so check pathkeys... */
+ List *old_path_pathkeys;
+
+ old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
+ keyscmp = compare_pathkeys(new_path_pathkeys,
+ old_path_pathkeys);
+ if (keyscmp == PATHKEYS_EQUAL ||
+ keyscmp == PATHKEYS_BETTER2)
+ {
+ /* new path does not win on pathkeys... */
+ if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
+ {
+ /* Found an old path that dominates the new one */
+ return false;
+ }
+ }
+ }
+ }
+ else
+ {
+ /*
+ * Since the pathlist is sorted by total_cost, we can stop looking
+ * once we reach a path with a total_cost larger than the new
+ * path's.
+ */
+ break;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * add_partial_path
+ * Like add_path, our goal here is to consider whether a path is worthy
+ * of being kept around, but the considerations here are a bit different.
+ * A partial path is one which can be executed in any number of workers in
+ * parallel such that each worker will generate a subset of the path's
+ * overall result.
+ *
+ * As in add_path, the partial_pathlist is kept sorted with the cheapest
+ * total path in front. This is depended on by multiple places, which
+ * just take the front entry as the cheapest path without searching.
+ *
+ * We don't generate parameterized partial paths for several reasons. Most
+ * importantly, they're not safe to execute, because there's nothing to
+ * make sure that a parallel scan within the parameterized portion of the
+ * plan is running with the same value in every worker at the same time.
+ * Fortunately, it seems unlikely to be worthwhile anyway, because having
+ * each worker scan the entire outer relation and a subset of the inner
+ * relation will generally be a terrible plan. The inner (parameterized)
+ * side of the plan will be small anyway. There could be rare cases where
+ * this wins big - e.g. if join order constraints put a 1-row relation on
+ * the outer side of the topmost join with a parameterized plan on the inner
+ * side - but we'll have to be content not to handle such cases until
+ * somebody builds an executor infrastructure that can cope with them.
+ *
+ * Because we don't consider parameterized paths here, we also don't
+ * need to consider the row counts as a measure of quality: every path will
+ * produce the same number of rows. Neither do we need to consider startup
+ * costs: parallelism is only used for plans that will be run to completion.
+ * Therefore, this routine is much simpler than add_path: it needs to
+ * consider only pathkeys and total cost.
+ *
+ * As with add_path, we pfree paths that are found to be dominated by
+ * another partial path; this requires that there be no other references to
+ * such paths yet. Hence, GatherPaths must not be created for a rel until
+ * we're done creating all partial paths for it. Unlike add_path, we don't
+ * take an exception for IndexPaths as partial index paths won't be
+ * referenced by partial BitmapHeapPaths.
+ */
+void
+add_partial_path(RelOptInfo *parent_rel, Path *new_path)
+{
+ bool accept_new = true; /* unless we find a superior old path */
+ int insert_at = 0; /* where to insert new item */
+ ListCell *p1;
+
+ /* Check for query cancel. */
+ CHECK_FOR_INTERRUPTS();
+
+ /* Path to be added must be parallel safe. */
+ Assert(new_path->parallel_safe);
+
+ /* Relation should be OK for parallelism, too. */
+ Assert(parent_rel->consider_parallel);
+
+ /*
+ * As in add_path, throw out any paths which are dominated by the new
+ * path, but throw out the new path if some existing path dominates it.
+ */
+ foreach(p1, parent_rel->partial_pathlist)
+ {
+ Path *old_path = (Path *) lfirst(p1);
+ bool remove_old = false; /* unless new proves superior */
+ PathKeysComparison keyscmp;
+
+ /* Compare pathkeys. */
+ keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
+
+ /* Unless pathkeys are incompatible, keep just one of the two paths. */
+ if (keyscmp != PATHKEYS_DIFFERENT)
+ {
+ if (new_path->total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
+ {
+ /* New path costs more; keep it only if pathkeys are better. */
+ if (keyscmp != PATHKEYS_BETTER1)
+ accept_new = false;
+ }
+ else if (old_path->total_cost > new_path->total_cost
+ * STD_FUZZ_FACTOR)
+ {
+ /* Old path costs more; keep it only if pathkeys are better. */
+ if (keyscmp != PATHKEYS_BETTER2)
+ remove_old = true;
+ }
+ else if (keyscmp == PATHKEYS_BETTER1)
+ {
+ /* Costs are about the same, new path has better pathkeys. */
+ remove_old = true;
+ }
+ else if (keyscmp == PATHKEYS_BETTER2)
+ {
+ /* Costs are about the same, old path has better pathkeys. */
+ accept_new = false;
+ }
+ else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
+ {
+ /* Pathkeys are the same, and the old path costs more. */
+ remove_old = true;
+ }
+ else
+ {
+ /*
+ * Pathkeys are the same, and new path isn't materially
+ * cheaper.
+ */
+ accept_new = false;
+ }
+ }
+
+ /*
+ * Remove current element from partial_pathlist if dominated by new.
+ */
+ if (remove_old)
+ {
+ parent_rel->partial_pathlist =
+ foreach_delete_current(parent_rel->partial_pathlist, p1);
+ pfree(old_path);
+ }
+ else
+ {
+ /* new belongs after this old path if it has cost >= old's */
+ if (new_path->total_cost >= old_path->total_cost)
+ insert_at = foreach_current_index(p1) + 1;
+ }
+
+ /*
+ * If we found an old path that dominates new_path, we can quit
+ * scanning the partial_pathlist; we will not add new_path, and we
+ * assume new_path cannot dominate any later path.
+ */
+ if (!accept_new)
+ break;
+ }
+
+ if (accept_new)
+ {
+ /* Accept the new path: insert it at proper place */
+ parent_rel->partial_pathlist =
+ list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
+ }
+ else
+ {
+ /* Reject and recycle the new path */
+ pfree(new_path);
+ }
+}
+
+/*
+ * add_partial_path_precheck
+ * Check whether a proposed new partial path could possibly get accepted.
+ *
+ * Unlike add_path_precheck, we can ignore startup cost and parameterization,
+ * since they don't matter for partial paths (see add_partial_path). But
+ * we do want to make sure we don't add a partial path if there's already
+ * a complete path that dominates it, since in that case the proposed path
+ * is surely a loser.
+ */
+bool
+add_partial_path_precheck(RelOptInfo *parent_rel, Cost total_cost,
+ List *pathkeys)
+{
+ ListCell *p1;
+
+ /*
+ * Our goal here is twofold. First, we want to find out whether this path
+ * is clearly inferior to some existing partial path. If so, we want to
+ * reject it immediately. Second, we want to find out whether this path
+ * is clearly superior to some existing partial path -- at least, modulo
+ * final cost computations. If so, we definitely want to consider it.
+ *
+ * Unlike add_path(), we always compare pathkeys here. This is because we
+ * expect partial_pathlist to be very short, and getting a definitive
+ * answer at this stage avoids the need to call add_path_precheck.
+ */
+ foreach(p1, parent_rel->partial_pathlist)
+ {
+ Path *old_path = (Path *) lfirst(p1);
+ PathKeysComparison keyscmp;
+
+ keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
+ if (keyscmp != PATHKEYS_DIFFERENT)
+ {
+ if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
+ keyscmp != PATHKEYS_BETTER1)
+ return false;
+ if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
+ keyscmp != PATHKEYS_BETTER2)
+ return true;
+ }
+ }
+
+ /*
+ * This path is neither clearly inferior to an existing partial path nor
+ * clearly good enough that it might replace one. Compare it to
+ * non-parallel plans. If it loses even before accounting for the cost of
+ * the Gather node, we should definitely reject it.
+ *
+ * Note that we pass the total_cost to add_path_precheck twice. This is
+ * because it's never advantageous to consider the startup cost of a
+ * partial path; the resulting plans, if run in parallel, will be run to
+ * completion.
+ */
+ if (!add_path_precheck(parent_rel, total_cost, total_cost, pathkeys,
+ NULL))
+ return false;
+
+ return true;
+}
+
+
+/*****************************************************************************
+ * PATH NODE CREATION ROUTINES
+ *****************************************************************************/
+
+/*
+ * create_seqscan_path
+ * Creates a path corresponding to a sequential scan, returning the
+ * pathnode.
+ */
+Path *
+create_seqscan_path(PlannerInfo *root, RelOptInfo *rel,
+ Relids required_outer, int parallel_workers)
+{
+ Path *pathnode = makeNode(Path);
+
+ pathnode->pathtype = T_SeqScan;
+ pathnode->parent = rel;
+ pathnode->pathtarget = rel->reltarget;
+ pathnode->param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->parallel_aware = (parallel_workers > 0);
+ pathnode->parallel_safe = rel->consider_parallel;
+ pathnode->parallel_workers = parallel_workers;
+ pathnode->pathkeys = NIL; /* seqscan has unordered result */
+
+ cost_seqscan(pathnode, root, rel, pathnode->param_info);
+
+ return pathnode;
+}
+
+/*
+ * create_samplescan_path
+ * Creates a path node for a sampled table scan.
+ */
+Path *
+create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
+{
+ Path *pathnode = makeNode(Path);
+
+ pathnode->pathtype = T_SampleScan;
+ pathnode->parent = rel;
+ pathnode->pathtarget = rel->reltarget;
+ pathnode->param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->parallel_aware = false;
+ pathnode->parallel_safe = rel->consider_parallel;
+ pathnode->parallel_workers = 0;
+ pathnode->pathkeys = NIL; /* samplescan has unordered result */
+
+ cost_samplescan(pathnode, root, rel, pathnode->param_info);
+
+ return pathnode;
+}
+
+/*
+ * create_index_path
+ * Creates a path node for an index scan.
+ *
+ * 'index' is a usable index.
+ * 'indexclauses' is a list of IndexClause nodes representing clauses
+ * to be enforced as qual conditions in the scan.
+ * 'indexorderbys' is a list of bare expressions (no RestrictInfos)
+ * to be used as index ordering operators in the scan.
+ * 'indexorderbycols' is an integer list of index column numbers (zero based)
+ * the ordering operators can be used with.
+ * 'pathkeys' describes the ordering of the path.
+ * 'indexscandir' is ForwardScanDirection or BackwardScanDirection
+ * for an ordered index, or NoMovementScanDirection for
+ * an unordered index.
+ * 'indexonly' is true if an index-only scan is wanted.
+ * 'required_outer' is the set of outer relids for a parameterized path.
+ * 'loop_count' is the number of repetitions of the indexscan to factor into
+ * estimates of caching behavior.
+ * 'partial_path' is true if constructing a parallel index scan path.
+ *
+ * Returns the new path node.
+ */
+IndexPath *
+create_index_path(PlannerInfo *root,
+ IndexOptInfo *index,
+ List *indexclauses,
+ List *indexorderbys,
+ List *indexorderbycols,
+ List *pathkeys,
+ ScanDirection indexscandir,
+ bool indexonly,
+ Relids required_outer,
+ double loop_count,
+ bool partial_path)
+{
+ IndexPath *pathnode = makeNode(IndexPath);
+ RelOptInfo *rel = index->rel;
+
+ pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+ pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = 0;
+ pathnode->path.pathkeys = pathkeys;
+
+ pathnode->indexinfo = index;
+ pathnode->indexclauses = indexclauses;
+ pathnode->indexorderbys = indexorderbys;
+ pathnode->indexorderbycols = indexorderbycols;
+ pathnode->indexscandir = indexscandir;
+
+ cost_index(pathnode, root, loop_count, partial_path);
+
+ return pathnode;
+}
+
+/*
+ * create_bitmap_heap_path
+ * Creates a path node for a bitmap scan.
+ *
+ * 'bitmapqual' is a tree of IndexPath, BitmapAndPath, and BitmapOrPath nodes.
+ * 'required_outer' is the set of outer relids for a parameterized path.
+ * 'loop_count' is the number of repetitions of the indexscan to factor into
+ * estimates of caching behavior.
+ *
+ * loop_count should match the value used when creating the component
+ * IndexPaths.
+ */
+BitmapHeapPath *
+create_bitmap_heap_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *bitmapqual,
+ Relids required_outer,
+ double loop_count,
+ int parallel_degree)
+{
+ BitmapHeapPath *pathnode = makeNode(BitmapHeapPath);
+
+ pathnode->path.pathtype = T_BitmapHeapScan;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+ pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->path.parallel_aware = (parallel_degree > 0);
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = parallel_degree;
+ pathnode->path.pathkeys = NIL; /* always unordered */
+
+ pathnode->bitmapqual = bitmapqual;
+
+ cost_bitmap_heap_scan(&pathnode->path, root, rel,
+ pathnode->path.param_info,
+ bitmapqual, loop_count);
+
+ return pathnode;
+}
+
+/*
+ * create_bitmap_and_path
+ * Creates a path node representing a BitmapAnd.
+ */
+BitmapAndPath *
+create_bitmap_and_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ List *bitmapquals)
+{
+ BitmapAndPath *pathnode = makeNode(BitmapAndPath);
+ Relids required_outer = NULL;
+ ListCell *lc;
+
+ pathnode->path.pathtype = T_BitmapAnd;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+
+ /*
+ * Identify the required outer rels as the union of what the child paths
+ * depend on. (Alternatively, we could insist that the caller pass this
+ * in, but it's more convenient and reliable to compute it here.)
+ */
+ foreach(lc, bitmapquals)
+ {
+ Path *bitmapqual = (Path *) lfirst(lc);
+
+ required_outer = bms_add_members(required_outer,
+ PATH_REQ_OUTER(bitmapqual));
+ }
+ pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+
+ /*
+ * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
+ * parallel-safe if and only if rel->consider_parallel is set. So, we can
+ * set the flag for this path based only on the relation-level flag,
+ * without actually iterating over the list of children.
+ */
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = 0;
+
+ pathnode->path.pathkeys = NIL; /* always unordered */
+
+ pathnode->bitmapquals = bitmapquals;
+
+ /* this sets bitmapselectivity as well as the regular cost fields: */
+ cost_bitmap_and_node(pathnode, root);
+
+ return pathnode;
+}
+
+/*
+ * create_bitmap_or_path
+ * Creates a path node representing a BitmapOr.
+ */
+BitmapOrPath *
+create_bitmap_or_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ List *bitmapquals)
+{
+ BitmapOrPath *pathnode = makeNode(BitmapOrPath);
+ Relids required_outer = NULL;
+ ListCell *lc;
+
+ pathnode->path.pathtype = T_BitmapOr;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+
+ /*
+ * Identify the required outer rels as the union of what the child paths
+ * depend on. (Alternatively, we could insist that the caller pass this
+ * in, but it's more convenient and reliable to compute it here.)
+ */
+ foreach(lc, bitmapquals)
+ {
+ Path *bitmapqual = (Path *) lfirst(lc);
+
+ required_outer = bms_add_members(required_outer,
+ PATH_REQ_OUTER(bitmapqual));
+ }
+ pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+
+ /*
+ * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
+ * parallel-safe if and only if rel->consider_parallel is set. So, we can
+ * set the flag for this path based only on the relation-level flag,
+ * without actually iterating over the list of children.
+ */
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = 0;
+
+ pathnode->path.pathkeys = NIL; /* always unordered */
+
+ pathnode->bitmapquals = bitmapquals;
+
+ /* this sets bitmapselectivity as well as the regular cost fields: */
+ cost_bitmap_or_node(pathnode, root);
+
+ return pathnode;
+}
+
+/*
+ * create_tidscan_path
+ * Creates a path corresponding to a scan by TID, returning the pathnode.
+ */
+TidPath *
+create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals,
+ Relids required_outer)
+{
+ TidPath *pathnode = makeNode(TidPath);
+
+ pathnode->path.pathtype = T_TidScan;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+ pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = 0;
+ pathnode->path.pathkeys = NIL; /* always unordered */
+
+ pathnode->tidquals = tidquals;
+
+ cost_tidscan(&pathnode->path, root, rel, tidquals,
+ pathnode->path.param_info);
+
+ return pathnode;
+}
+
+/*
+ * create_tidrangescan_path
+ * Creates a path corresponding to a scan by a range of TIDs, returning
+ * the pathnode.
+ */
+TidRangePath *
+create_tidrangescan_path(PlannerInfo *root, RelOptInfo *rel,
+ List *tidrangequals, Relids required_outer)
+{
+ TidRangePath *pathnode = makeNode(TidRangePath);
+
+ pathnode->path.pathtype = T_TidRangeScan;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+ pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = 0;
+ pathnode->path.pathkeys = NIL; /* always unordered */
+
+ pathnode->tidrangequals = tidrangequals;
+
+ cost_tidrangescan(&pathnode->path, root, rel, tidrangequals,
+ pathnode->path.param_info);
+
+ return pathnode;
+}
+
+/*
+ * create_append_path
+ * Creates a path corresponding to an Append plan, returning the
+ * pathnode.
+ *
+ * Note that we must handle subpaths = NIL, representing a dummy access path.
+ * Also, there are callers that pass root = NULL.
+ */
+AppendPath *
+create_append_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ List *subpaths, List *partial_subpaths,
+ List *pathkeys, Relids required_outer,
+ int parallel_workers, bool parallel_aware,
+ double rows)
+{
+ AppendPath *pathnode = makeNode(AppendPath);
+ ListCell *l;
+
+ Assert(!parallel_aware || parallel_workers > 0);
+
+ pathnode->path.pathtype = T_Append;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+
+ /*
+ * When generating an Append path for a partitioned table, there may be
+ * parameterized quals that are useful for run-time pruning. Hence,
+ * compute path.param_info the same way as for any other baserel, so that
+ * such quals will be available for make_partition_pruneinfo(). (This
+ * would not work right for a non-baserel, ie a scan on a non-leaf child
+ * partition, and it's not necessary anyway in that case. Must skip it if
+ * we don't have "root", too.)
+ */
+ if (root && rel->reloptkind == RELOPT_BASEREL && IS_PARTITIONED_REL(rel))
+ pathnode->path.param_info = get_baserel_parampathinfo(root,
+ rel,
+ required_outer);
+ else
+ pathnode->path.param_info = get_appendrel_parampathinfo(rel,
+ required_outer);
+
+ pathnode->path.parallel_aware = parallel_aware;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = parallel_workers;
+ pathnode->path.pathkeys = pathkeys;
+
+ /*
+ * For parallel append, non-partial paths are sorted by descending total
+ * costs. That way, the total time to finish all non-partial paths is
+ * minimized. Also, the partial paths are sorted by descending startup
+ * costs. There may be some paths that require to do startup work by a
+ * single worker. In such case, it's better for workers to choose the
+ * expensive ones first, whereas the leader should choose the cheapest
+ * startup plan.
+ */
+ if (pathnode->path.parallel_aware)
+ {
+ /*
+ * We mustn't fiddle with the order of subpaths when the Append has
+ * pathkeys. The order they're listed in is critical to keeping the
+ * pathkeys valid.
+ */
+ Assert(pathkeys == NIL);
+
+ list_sort(subpaths, append_total_cost_compare);
+ list_sort(partial_subpaths, append_startup_cost_compare);
+ }
+ pathnode->first_partial_path = list_length(subpaths);
+ pathnode->subpaths = list_concat(subpaths, partial_subpaths);
+
+ /*
+ * Apply query-wide LIMIT if known and path is for sole base relation.
+ * (Handling this at this low level is a bit klugy.)
+ */
+ if (root != NULL && bms_equal(rel->relids, root->all_baserels))
+ pathnode->limit_tuples = root->limit_tuples;
+ else
+ pathnode->limit_tuples = -1.0;
+
+ foreach(l, pathnode->subpaths)
+ {
+ Path *subpath = (Path *) lfirst(l);
+
+ pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
+ subpath->parallel_safe;
+
+ /* All child paths must have same parameterization */
+ Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
+ }
+
+ Assert(!parallel_aware || pathnode->path.parallel_safe);
+
+ /*
+ * If there's exactly one child path, the Append is a no-op and will be
+ * discarded later (in setrefs.c); therefore, we can inherit the child's
+ * size and cost, as well as its pathkeys if any (overriding whatever the
+ * caller might've said). Otherwise, we must do the normal costsize
+ * calculation.
+ */
+ if (list_length(pathnode->subpaths) == 1)
+ {
+ Path *child = (Path *) linitial(pathnode->subpaths);
+
+ pathnode->path.rows = child->rows;
+ pathnode->path.startup_cost = child->startup_cost;
+ pathnode->path.total_cost = child->total_cost;
+ pathnode->path.pathkeys = child->pathkeys;
+ }
+ else
+ cost_append(pathnode);
+
+ /* If the caller provided a row estimate, override the computed value. */
+ if (rows >= 0)
+ pathnode->path.rows = rows;
+
+ return pathnode;
+}
+
+/*
+ * append_total_cost_compare
+ * list_sort comparator for sorting append child paths
+ * by total_cost descending
+ *
+ * For equal total costs, we fall back to comparing startup costs; if those
+ * are equal too, break ties using bms_compare on the paths' relids.
+ * (This is to avoid getting unpredictable results from list_sort.)
+ */
+static int
+append_total_cost_compare(const ListCell *a, const ListCell *b)
+{
+ Path *path1 = (Path *) lfirst(a);
+ Path *path2 = (Path *) lfirst(b);
+ int cmp;
+
+ cmp = compare_path_costs(path1, path2, TOTAL_COST);
+ if (cmp != 0)
+ return -cmp;
+ return bms_compare(path1->parent->relids, path2->parent->relids);
+}
+
+/*
+ * append_startup_cost_compare
+ * list_sort comparator for sorting append child paths
+ * by startup_cost descending
+ *
+ * For equal startup costs, we fall back to comparing total costs; if those
+ * are equal too, break ties using bms_compare on the paths' relids.
+ * (This is to avoid getting unpredictable results from list_sort.)
+ */
+static int
+append_startup_cost_compare(const ListCell *a, const ListCell *b)
+{
+ Path *path1 = (Path *) lfirst(a);
+ Path *path2 = (Path *) lfirst(b);
+ int cmp;
+
+ cmp = compare_path_costs(path1, path2, STARTUP_COST);
+ if (cmp != 0)
+ return -cmp;
+ return bms_compare(path1->parent->relids, path2->parent->relids);
+}
+
+/*
+ * create_merge_append_path
+ * Creates a path corresponding to a MergeAppend plan, returning the
+ * pathnode.
+ */
+MergeAppendPath *
+create_merge_append_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ List *subpaths,
+ List *pathkeys,
+ Relids required_outer)
+{
+ MergeAppendPath *pathnode = makeNode(MergeAppendPath);
+ Cost input_startup_cost;
+ Cost input_total_cost;
+ ListCell *l;
+
+ pathnode->path.pathtype = T_MergeAppend;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+ pathnode->path.param_info = get_appendrel_parampathinfo(rel,
+ required_outer);
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = 0;
+ pathnode->path.pathkeys = pathkeys;
+ pathnode->subpaths = subpaths;
+
+ /*
+ * Apply query-wide LIMIT if known and path is for sole base relation.
+ * (Handling this at this low level is a bit klugy.)
+ */
+ if (bms_equal(rel->relids, root->all_baserels))
+ pathnode->limit_tuples = root->limit_tuples;
+ else
+ pathnode->limit_tuples = -1.0;
+
+ /*
+ * Add up the sizes and costs of the input paths.
+ */
+ pathnode->path.rows = 0;
+ input_startup_cost = 0;
+ input_total_cost = 0;
+ foreach(l, subpaths)
+ {
+ Path *subpath = (Path *) lfirst(l);
+
+ pathnode->path.rows += subpath->rows;
+ pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
+ subpath->parallel_safe;
+
+ if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
+ {
+ /* Subpath is adequately ordered, we won't need to sort it */
+ input_startup_cost += subpath->startup_cost;
+ input_total_cost += subpath->total_cost;
+ }
+ else
+ {
+ /* We'll need to insert a Sort node, so include cost for that */
+ Path sort_path; /* dummy for result of cost_sort */
+
+ cost_sort(&sort_path,
+ root,
+ pathkeys,
+ subpath->total_cost,
+ subpath->parent->tuples,
+ subpath->pathtarget->width,
+ 0.0,
+ work_mem,
+ pathnode->limit_tuples);
+ input_startup_cost += sort_path.startup_cost;
+ input_total_cost += sort_path.total_cost;
+ }
+
+ /* All child paths must have same parameterization */
+ Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
+ }
+
+ /*
+ * Now we can compute total costs of the MergeAppend. If there's exactly
+ * one child path, the MergeAppend is a no-op and will be discarded later
+ * (in setrefs.c); otherwise we do the normal cost calculation.
+ */
+ if (list_length(subpaths) == 1)
+ {
+ pathnode->path.startup_cost = input_startup_cost;
+ pathnode->path.total_cost = input_total_cost;
+ }
+ else
+ cost_merge_append(&pathnode->path, root,
+ pathkeys, list_length(subpaths),
+ input_startup_cost, input_total_cost,
+ pathnode->path.rows);
+
+ return pathnode;
+}
+
+/*
+ * create_group_result_path
+ * Creates a path representing a Result-and-nothing-else plan.
+ *
+ * This is only used for degenerate grouping cases, in which we know we
+ * need to produce one result row, possibly filtered by a HAVING qual.
+ */
+GroupResultPath *
+create_group_result_path(PlannerInfo *root, RelOptInfo *rel,
+ PathTarget *target, List *havingqual)
+{
+ GroupResultPath *pathnode = makeNode(GroupResultPath);
+
+ pathnode->path.pathtype = T_Result;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target;
+ pathnode->path.param_info = NULL; /* there are no other rels... */
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = 0;
+ pathnode->path.pathkeys = NIL;
+ pathnode->quals = havingqual;
+
+ /*
+ * We can't quite use cost_resultscan() because the quals we want to
+ * account for are not baserestrict quals of the rel. Might as well just
+ * hack it here.
+ */
+ pathnode->path.rows = 1;
+ pathnode->path.startup_cost = target->cost.startup;
+ pathnode->path.total_cost = target->cost.startup +
+ cpu_tuple_cost + target->cost.per_tuple;
+
+ /*
+ * Add cost of qual, if any --- but we ignore its selectivity, since our
+ * rowcount estimate should be 1 no matter what the qual is.
+ */
+ if (havingqual)
+ {
+ QualCost qual_cost;
+
+ cost_qual_eval(&qual_cost, havingqual, root);
+ /* havingqual is evaluated once at startup */
+ pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
+ pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
+ }
+
+ return pathnode;
+}
+
+/*
+ * create_material_path
+ * Creates a path corresponding to a Material plan, returning the
+ * pathnode.
+ */
+MaterialPath *
+create_material_path(RelOptInfo *rel, Path *subpath)
+{
+ MaterialPath *pathnode = makeNode(MaterialPath);
+
+ Assert(subpath->parent == rel);
+
+ pathnode->path.pathtype = T_Material;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+ pathnode->path.param_info = subpath->param_info;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ pathnode->path.pathkeys = subpath->pathkeys;
+
+ pathnode->subpath = subpath;
+
+ cost_material(&pathnode->path,
+ subpath->startup_cost,
+ subpath->total_cost,
+ subpath->rows,
+ subpath->pathtarget->width);
+
+ return pathnode;
+}
+
+/*
+ * create_memoize_path
+ * Creates a path corresponding to a Memoize plan, returning the pathnode.
+ */
+MemoizePath *
+create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
+ List *param_exprs, List *hash_operators,
+ bool singlerow, bool binary_mode, double calls)
+{
+ MemoizePath *pathnode = makeNode(MemoizePath);
+
+ Assert(subpath->parent == rel);
+
+ pathnode->path.pathtype = T_Memoize;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+ pathnode->path.param_info = subpath->param_info;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ pathnode->path.pathkeys = subpath->pathkeys;
+
+ pathnode->subpath = subpath;
+ pathnode->hash_operators = hash_operators;
+ pathnode->param_exprs = param_exprs;
+ pathnode->singlerow = singlerow;
+ pathnode->binary_mode = binary_mode;
+ pathnode->calls = calls;
+
+ /*
+ * For now we set est_entries to 0. cost_memoize_rescan() does all the
+ * hard work to determine how many cache entries there are likely to be,
+ * so it seems best to leave it up to that function to fill this field in.
+ * If left at 0, the executor will make a guess at a good value.
+ */
+ pathnode->est_entries = 0;
+
+ /*
+ * Add a small additional charge for caching the first entry. All the
+ * harder calculations for rescans are performed in cost_memoize_rescan().
+ */
+ pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
+ pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
+ pathnode->path.rows = subpath->rows;
+
+ return pathnode;
+}
+
+/*
+ * create_unique_path
+ * Creates a path representing elimination of distinct rows from the
+ * input data. Distinct-ness is defined according to the needs of the
+ * semijoin represented by sjinfo. If it is not possible to identify
+ * how to make the data unique, NULL is returned.
+ *
+ * If used at all, this is likely to be called repeatedly on the same rel;
+ * and the input subpath should always be the same (the cheapest_total path
+ * for the rel). So we cache the result.
+ */
+UniquePath *
+create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
+ SpecialJoinInfo *sjinfo)
+{
+ UniquePath *pathnode;
+ Path sort_path; /* dummy for result of cost_sort */
+ Path agg_path; /* dummy for result of cost_agg */
+ MemoryContext oldcontext;
+ int numCols;
+
+ /* Caller made a mistake if subpath isn't cheapest_total ... */
+ Assert(subpath == rel->cheapest_total_path);
+ Assert(subpath->parent == rel);
+ /* ... or if SpecialJoinInfo is the wrong one */
+ Assert(sjinfo->jointype == JOIN_SEMI);
+ Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
+
+ /* If result already cached, return it */
+ if (rel->cheapest_unique_path)
+ return (UniquePath *) rel->cheapest_unique_path;
+
+ /* If it's not possible to unique-ify, return NULL */
+ if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
+ return NULL;
+
+ /*
+ * When called during GEQO join planning, we are in a short-lived memory
+ * context. We must make sure that the path and any subsidiary data
+ * structures created for a baserel survive the GEQO cycle, else the
+ * baserel is trashed for future GEQO cycles. On the other hand, when we
+ * are creating those for a joinrel during GEQO, we don't want them to
+ * clutter the main planning context. Upshot is that the best solution is
+ * to explicitly allocate memory in the same context the given RelOptInfo
+ * is in.
+ */
+ oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
+
+ pathnode = makeNode(UniquePath);
+
+ pathnode->path.pathtype = T_Unique;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+ pathnode->path.param_info = subpath->param_info;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+
+ /*
+ * Assume the output is unsorted, since we don't necessarily have pathkeys
+ * to represent it. (This might get overridden below.)
+ */
+ pathnode->path.pathkeys = NIL;
+
+ pathnode->subpath = subpath;
+ pathnode->in_operators = sjinfo->semi_operators;
+ pathnode->uniq_exprs = sjinfo->semi_rhs_exprs;
+
+ /*
+ * If the input is a relation and it has a unique index that proves the
+ * semi_rhs_exprs are unique, then we don't need to do anything. Note
+ * that relation_has_unique_index_for automatically considers restriction
+ * clauses for the rel, as well.
+ */
+ if (rel->rtekind == RTE_RELATION && sjinfo->semi_can_btree &&
+ relation_has_unique_index_for(root, rel, NIL,
+ sjinfo->semi_rhs_exprs,
+ sjinfo->semi_operators))
+ {
+ pathnode->umethod = UNIQUE_PATH_NOOP;
+ pathnode->path.rows = rel->rows;
+ pathnode->path.startup_cost = subpath->startup_cost;
+ pathnode->path.total_cost = subpath->total_cost;
+ pathnode->path.pathkeys = subpath->pathkeys;
+
+ rel->cheapest_unique_path = (Path *) pathnode;
+
+ MemoryContextSwitchTo(oldcontext);
+
+ return pathnode;
+ }
+
+ /*
+ * If the input is a subquery whose output must be unique already, then we
+ * don't need to do anything. The test for uniqueness has to consider
+ * exactly which columns we are extracting; for example "SELECT DISTINCT
+ * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
+ * this optimization unless semi_rhs_exprs consists only of simple Vars
+ * referencing subquery outputs. (Possibly we could do something with
+ * expressions in the subquery outputs, too, but for now keep it simple.)
+ */
+ if (rel->rtekind == RTE_SUBQUERY)
+ {
+ RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
+
+ if (query_supports_distinctness(rte->subquery))
+ {
+ List *sub_tlist_colnos;
+
+ sub_tlist_colnos = translate_sub_tlist(sjinfo->semi_rhs_exprs,
+ rel->relid);
+
+ if (sub_tlist_colnos &&
+ query_is_distinct_for(rte->subquery,
+ sub_tlist_colnos,
+ sjinfo->semi_operators))
+ {
+ pathnode->umethod = UNIQUE_PATH_NOOP;
+ pathnode->path.rows = rel->rows;
+ pathnode->path.startup_cost = subpath->startup_cost;
+ pathnode->path.total_cost = subpath->total_cost;
+ pathnode->path.pathkeys = subpath->pathkeys;
+
+ rel->cheapest_unique_path = (Path *) pathnode;
+
+ MemoryContextSwitchTo(oldcontext);
+
+ return pathnode;
+ }
+ }
+ }
+
+ /* Estimate number of output rows */
+ pathnode->path.rows = estimate_num_groups(root,
+ sjinfo->semi_rhs_exprs,
+ rel->rows,
+ NULL,
+ NULL);
+ numCols = list_length(sjinfo->semi_rhs_exprs);
+
+ if (sjinfo->semi_can_btree)
+ {
+ /*
+ * Estimate cost for sort+unique implementation
+ */
+ cost_sort(&sort_path, root, NIL,
+ subpath->total_cost,
+ rel->rows,
+ subpath->pathtarget->width,
+ 0.0,
+ work_mem,
+ -1.0);
+
+ /*
+ * Charge one cpu_operator_cost per comparison per input tuple. We
+ * assume all columns get compared at most of the tuples. (XXX
+ * probably this is an overestimate.) This should agree with
+ * create_upper_unique_path.
+ */
+ sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
+ }
+
+ if (sjinfo->semi_can_hash)
+ {
+ /*
+ * Estimate the overhead per hashtable entry at 64 bytes (same as in
+ * planner.c).
+ */
+ int hashentrysize = subpath->pathtarget->width + 64;
+
+ if (hashentrysize * pathnode->path.rows > get_hash_memory_limit())
+ {
+ /*
+ * We should not try to hash. Hack the SpecialJoinInfo to
+ * remember this, in case we come through here again.
+ */
+ sjinfo->semi_can_hash = false;
+ }
+ else
+ cost_agg(&agg_path, root,
+ AGG_HASHED, NULL,
+ numCols, pathnode->path.rows,
+ NIL,
+ subpath->startup_cost,
+ subpath->total_cost,
+ rel->rows,
+ subpath->pathtarget->width);
+ }
+
+ if (sjinfo->semi_can_btree && sjinfo->semi_can_hash)
+ {
+ if (agg_path.total_cost < sort_path.total_cost)
+ pathnode->umethod = UNIQUE_PATH_HASH;
+ else
+ pathnode->umethod = UNIQUE_PATH_SORT;
+ }
+ else if (sjinfo->semi_can_btree)
+ pathnode->umethod = UNIQUE_PATH_SORT;
+ else if (sjinfo->semi_can_hash)
+ pathnode->umethod = UNIQUE_PATH_HASH;
+ else
+ {
+ /* we can get here only if we abandoned hashing above */
+ MemoryContextSwitchTo(oldcontext);
+ return NULL;
+ }
+
+ if (pathnode->umethod == UNIQUE_PATH_HASH)
+ {
+ pathnode->path.startup_cost = agg_path.startup_cost;
+ pathnode->path.total_cost = agg_path.total_cost;
+ }
+ else
+ {
+ pathnode->path.startup_cost = sort_path.startup_cost;
+ pathnode->path.total_cost = sort_path.total_cost;
+ }
+
+ rel->cheapest_unique_path = (Path *) pathnode;
+
+ MemoryContextSwitchTo(oldcontext);
+
+ return pathnode;
+}
+
+/*
+ * create_gather_merge_path
+ *
+ * Creates a path corresponding to a gather merge scan, returning
+ * the pathnode.
+ */
+GatherMergePath *
+create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
+ PathTarget *target, List *pathkeys,
+ Relids required_outer, double *rows)
+{
+ GatherMergePath *pathnode = makeNode(GatherMergePath);
+ Cost input_startup_cost = 0;
+ Cost input_total_cost = 0;
+
+ Assert(subpath->parallel_safe);
+ Assert(pathkeys);
+
+ pathnode->path.pathtype = T_GatherMerge;
+ pathnode->path.parent = rel;
+ pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->path.parallel_aware = false;
+
+ pathnode->subpath = subpath;
+ pathnode->num_workers = subpath->parallel_workers;
+ pathnode->path.pathkeys = pathkeys;
+ pathnode->path.pathtarget = target ? target : rel->reltarget;
+ pathnode->path.rows += subpath->rows;
+
+ if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
+ {
+ /* Subpath is adequately ordered, we won't need to sort it */
+ input_startup_cost += subpath->startup_cost;
+ input_total_cost += subpath->total_cost;
+ }
+ else
+ {
+ /* We'll need to insert a Sort node, so include cost for that */
+ Path sort_path; /* dummy for result of cost_sort */
+
+ cost_sort(&sort_path,
+ root,
+ pathkeys,
+ subpath->total_cost,
+ subpath->rows,
+ subpath->pathtarget->width,
+ 0.0,
+ work_mem,
+ -1);
+ input_startup_cost += sort_path.startup_cost;
+ input_total_cost += sort_path.total_cost;
+ }
+
+ cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
+ input_startup_cost, input_total_cost, rows);
+
+ return pathnode;
+}
+
+/*
+ * translate_sub_tlist - get subquery column numbers represented by tlist
+ *
+ * The given targetlist usually contains only Vars referencing the given relid.
+ * Extract their varattnos (ie, the column numbers of the subquery) and return
+ * as an integer List.
+ *
+ * If any of the tlist items is not a simple Var, we cannot determine whether
+ * the subquery's uniqueness condition (if any) matches ours, so punt and
+ * return NIL.
+ */
+static List *
+translate_sub_tlist(List *tlist, int relid)
+{
+ List *result = NIL;
+ ListCell *l;
+
+ foreach(l, tlist)
+ {
+ Var *var = (Var *) lfirst(l);
+
+ if (!var || !IsA(var, Var) ||
+ var->varno != relid)
+ return NIL; /* punt */
+
+ result = lappend_int(result, var->varattno);
+ }
+ return result;
+}
+
+/*
+ * create_gather_path
+ * Creates a path corresponding to a gather scan, returning the
+ * pathnode.
+ *
+ * 'rows' may optionally be set to override row estimates from other sources.
+ */
+GatherPath *
+create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
+ PathTarget *target, Relids required_outer, double *rows)
+{
+ GatherPath *pathnode = makeNode(GatherPath);
+
+ Assert(subpath->parallel_safe);
+
+ pathnode->path.pathtype = T_Gather;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target;
+ pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = false;
+ pathnode->path.parallel_workers = 0;
+ pathnode->path.pathkeys = NIL; /* Gather has unordered result */
+
+ pathnode->subpath = subpath;
+ pathnode->num_workers = subpath->parallel_workers;
+ pathnode->single_copy = false;
+
+ if (pathnode->num_workers == 0)
+ {
+ pathnode->path.pathkeys = subpath->pathkeys;
+ pathnode->num_workers = 1;
+ pathnode->single_copy = true;
+ }
+
+ cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
+
+ return pathnode;
+}
+
+/*
+ * create_subqueryscan_path
+ * Creates a path corresponding to a scan of a subquery,
+ * returning the pathnode.
+ */
+SubqueryScanPath *
+create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
+ List *pathkeys, Relids required_outer)
+{
+ SubqueryScanPath *pathnode = makeNode(SubqueryScanPath);
+
+ pathnode->path.pathtype = T_SubqueryScan;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = rel->reltarget;
+ pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ pathnode->path.pathkeys = pathkeys;
+ pathnode->subpath = subpath;
+
+ cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info);
+
+ return pathnode;
+}
+
+/*
+ * create_functionscan_path
+ * Creates a path corresponding to a sequential scan of a function,
+ * returning the pathnode.
+ */
+Path *
+create_functionscan_path(PlannerInfo *root, RelOptInfo *rel,
+ List *pathkeys, Relids required_outer)
+{
+ Path *pathnode = makeNode(Path);
+
+ pathnode->pathtype = T_FunctionScan;
+ pathnode->parent = rel;
+ pathnode->pathtarget = rel->reltarget;
+ pathnode->param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->parallel_aware = false;
+ pathnode->parallel_safe = rel->consider_parallel;
+ pathnode->parallel_workers = 0;
+ pathnode->pathkeys = pathkeys;
+
+ cost_functionscan(pathnode, root, rel, pathnode->param_info);
+
+ return pathnode;
+}
+
+/*
+ * create_tablefuncscan_path
+ * Creates a path corresponding to a sequential scan of a table function,
+ * returning the pathnode.
+ */
+Path *
+create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel,
+ Relids required_outer)
+{
+ Path *pathnode = makeNode(Path);
+
+ pathnode->pathtype = T_TableFuncScan;
+ pathnode->parent = rel;
+ pathnode->pathtarget = rel->reltarget;
+ pathnode->param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->parallel_aware = false;
+ pathnode->parallel_safe = rel->consider_parallel;
+ pathnode->parallel_workers = 0;
+ pathnode->pathkeys = NIL; /* result is always unordered */
+
+ cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
+
+ return pathnode;
+}
+
+/*
+ * create_valuesscan_path
+ * Creates a path corresponding to a scan of a VALUES list,
+ * returning the pathnode.
+ */
+Path *
+create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel,
+ Relids required_outer)
+{
+ Path *pathnode = makeNode(Path);
+
+ pathnode->pathtype = T_ValuesScan;
+ pathnode->parent = rel;
+ pathnode->pathtarget = rel->reltarget;
+ pathnode->param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->parallel_aware = false;
+ pathnode->parallel_safe = rel->consider_parallel;
+ pathnode->parallel_workers = 0;
+ pathnode->pathkeys = NIL; /* result is always unordered */
+
+ cost_valuesscan(pathnode, root, rel, pathnode->param_info);
+
+ return pathnode;
+}
+
+/*
+ * create_ctescan_path
+ * Creates a path corresponding to a scan of a non-self-reference CTE,
+ * returning the pathnode.
+ */
+Path *
+create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
+{
+ Path *pathnode = makeNode(Path);
+
+ pathnode->pathtype = T_CteScan;
+ pathnode->parent = rel;
+ pathnode->pathtarget = rel->reltarget;
+ pathnode->param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->parallel_aware = false;
+ pathnode->parallel_safe = rel->consider_parallel;
+ pathnode->parallel_workers = 0;
+ pathnode->pathkeys = NIL; /* XXX for now, result is always unordered */
+
+ cost_ctescan(pathnode, root, rel, pathnode->param_info);
+
+ return pathnode;
+}
+
+/*
+ * create_namedtuplestorescan_path
+ * Creates a path corresponding to a scan of a named tuplestore, returning
+ * the pathnode.
+ */
+Path *
+create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel,
+ Relids required_outer)
+{
+ Path *pathnode = makeNode(Path);
+
+ pathnode->pathtype = T_NamedTuplestoreScan;
+ pathnode->parent = rel;
+ pathnode->pathtarget = rel->reltarget;
+ pathnode->param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->parallel_aware = false;
+ pathnode->parallel_safe = rel->consider_parallel;
+ pathnode->parallel_workers = 0;
+ pathnode->pathkeys = NIL; /* result is always unordered */
+
+ cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
+
+ return pathnode;
+}
+
+/*
+ * create_resultscan_path
+ * Creates a path corresponding to a scan of an RTE_RESULT relation,
+ * returning the pathnode.
+ */
+Path *
+create_resultscan_path(PlannerInfo *root, RelOptInfo *rel,
+ Relids required_outer)
+{
+ Path *pathnode = makeNode(Path);
+
+ pathnode->pathtype = T_Result;
+ pathnode->parent = rel;
+ pathnode->pathtarget = rel->reltarget;
+ pathnode->param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->parallel_aware = false;
+ pathnode->parallel_safe = rel->consider_parallel;
+ pathnode->parallel_workers = 0;
+ pathnode->pathkeys = NIL; /* result is always unordered */
+
+ cost_resultscan(pathnode, root, rel, pathnode->param_info);
+
+ return pathnode;
+}
+
+/*
+ * create_worktablescan_path
+ * Creates a path corresponding to a scan of a self-reference CTE,
+ * returning the pathnode.
+ */
+Path *
+create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel,
+ Relids required_outer)
+{
+ Path *pathnode = makeNode(Path);
+
+ pathnode->pathtype = T_WorkTableScan;
+ pathnode->parent = rel;
+ pathnode->pathtarget = rel->reltarget;
+ pathnode->param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->parallel_aware = false;
+ pathnode->parallel_safe = rel->consider_parallel;
+ pathnode->parallel_workers = 0;
+ pathnode->pathkeys = NIL; /* result is always unordered */
+
+ /* Cost is the same as for a regular CTE scan */
+ cost_ctescan(pathnode, root, rel, pathnode->param_info);
+
+ return pathnode;
+}
+
+/*
+ * create_foreignscan_path
+ * Creates a path corresponding to a scan of a foreign base table,
+ * returning the pathnode.
+ *
+ * This function is never called from core Postgres; rather, it's expected
+ * to be called by the GetForeignPaths function of a foreign data wrapper.
+ * We make the FDW supply all fields of the path, since we do not have any way
+ * to calculate them in core. However, there is a usually-sane default for
+ * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
+ */
+ForeignPath *
+create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel,
+ PathTarget *target,
+ double rows, Cost startup_cost, Cost total_cost,
+ List *pathkeys,
+ Relids required_outer,
+ Path *fdw_outerpath,
+ List *fdw_private)
+{
+ ForeignPath *pathnode = makeNode(ForeignPath);
+
+ /* Historically some FDWs were confused about when to use this */
+ Assert(IS_SIMPLE_REL(rel));
+
+ pathnode->path.pathtype = T_ForeignScan;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target ? target : rel->reltarget;
+ pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = 0;
+ pathnode->path.rows = rows;
+ pathnode->path.startup_cost = startup_cost;
+ pathnode->path.total_cost = total_cost;
+ pathnode->path.pathkeys = pathkeys;
+
+ pathnode->fdw_outerpath = fdw_outerpath;
+ pathnode->fdw_private = fdw_private;
+
+ return pathnode;
+}
+
+/*
+ * create_foreign_join_path
+ * Creates a path corresponding to a scan of a foreign join,
+ * returning the pathnode.
+ *
+ * This function is never called from core Postgres; rather, it's expected
+ * to be called by the GetForeignJoinPaths function of a foreign data wrapper.
+ * We make the FDW supply all fields of the path, since we do not have any way
+ * to calculate them in core. However, there is a usually-sane default for
+ * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
+ */
+ForeignPath *
+create_foreign_join_path(PlannerInfo *root, RelOptInfo *rel,
+ PathTarget *target,
+ double rows, Cost startup_cost, Cost total_cost,
+ List *pathkeys,
+ Relids required_outer,
+ Path *fdw_outerpath,
+ List *fdw_private)
+{
+ ForeignPath *pathnode = makeNode(ForeignPath);
+
+ /*
+ * We should use get_joinrel_parampathinfo to handle parameterized paths,
+ * but the API of this function doesn't support it, and existing
+ * extensions aren't yet trying to build such paths anyway. For the
+ * moment just throw an error if someone tries it; eventually we should
+ * revisit this.
+ */
+ if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
+ elog(ERROR, "parameterized foreign joins are not supported yet");
+
+ pathnode->path.pathtype = T_ForeignScan;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target ? target : rel->reltarget;
+ pathnode->path.param_info = NULL; /* XXX see above */
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = 0;
+ pathnode->path.rows = rows;
+ pathnode->path.startup_cost = startup_cost;
+ pathnode->path.total_cost = total_cost;
+ pathnode->path.pathkeys = pathkeys;
+
+ pathnode->fdw_outerpath = fdw_outerpath;
+ pathnode->fdw_private = fdw_private;
+
+ return pathnode;
+}
+
+/*
+ * create_foreign_upper_path
+ * Creates a path corresponding to an upper relation that's computed
+ * directly by an FDW, returning the pathnode.
+ *
+ * This function is never called from core Postgres; rather, it's expected to
+ * be called by the GetForeignUpperPaths function of a foreign data wrapper.
+ * We make the FDW supply all fields of the path, since we do not have any way
+ * to calculate them in core. However, there is a usually-sane default for
+ * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
+ */
+ForeignPath *
+create_foreign_upper_path(PlannerInfo *root, RelOptInfo *rel,
+ PathTarget *target,
+ double rows, Cost startup_cost, Cost total_cost,
+ List *pathkeys,
+ Path *fdw_outerpath,
+ List *fdw_private)
+{
+ ForeignPath *pathnode = makeNode(ForeignPath);
+
+ /*
+ * Upper relations should never have any lateral references, since joining
+ * is complete.
+ */
+ Assert(bms_is_empty(rel->lateral_relids));
+
+ pathnode->path.pathtype = T_ForeignScan;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target ? target : rel->reltarget;
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel;
+ pathnode->path.parallel_workers = 0;
+ pathnode->path.rows = rows;
+ pathnode->path.startup_cost = startup_cost;
+ pathnode->path.total_cost = total_cost;
+ pathnode->path.pathkeys = pathkeys;
+
+ pathnode->fdw_outerpath = fdw_outerpath;
+ pathnode->fdw_private = fdw_private;
+
+ return pathnode;
+}
+
+/*
+ * calc_nestloop_required_outer
+ * Compute the required_outer set for a nestloop join path
+ *
+ * Note: result must not share storage with either input
+ */
+Relids
+calc_nestloop_required_outer(Relids outerrelids,
+ Relids outer_paramrels,
+ Relids innerrelids,
+ Relids inner_paramrels)
+{
+ Relids required_outer;
+
+ /* inner_path can require rels from outer path, but not vice versa */
+ Assert(!bms_overlap(outer_paramrels, innerrelids));
+ /* easy case if inner path is not parameterized */
+ if (!inner_paramrels)
+ return bms_copy(outer_paramrels);
+ /* else, form the union ... */
+ required_outer = bms_union(outer_paramrels, inner_paramrels);
+ /* ... and remove any mention of now-satisfied outer rels */
+ required_outer = bms_del_members(required_outer,
+ outerrelids);
+ /* maintain invariant that required_outer is exactly NULL if empty */
+ if (bms_is_empty(required_outer))
+ {
+ bms_free(required_outer);
+ required_outer = NULL;
+ }
+ return required_outer;
+}
+
+/*
+ * calc_non_nestloop_required_outer
+ * Compute the required_outer set for a merge or hash join path
+ *
+ * Note: result must not share storage with either input
+ */
+Relids
+calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
+{
+ Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
+ Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
+ Relids required_outer;
+
+ /* neither path can require rels from the other */
+ Assert(!bms_overlap(outer_paramrels, inner_path->parent->relids));
+ Assert(!bms_overlap(inner_paramrels, outer_path->parent->relids));
+ /* form the union ... */
+ required_outer = bms_union(outer_paramrels, inner_paramrels);
+ /* we do not need an explicit test for empty; bms_union gets it right */
+ return required_outer;
+}
+
+/*
+ * create_nestloop_path
+ * Creates a pathnode corresponding to a nestloop join between two
+ * relations.
+ *
+ * 'joinrel' is the join relation.
+ * 'jointype' is the type of join required
+ * 'workspace' is the result from initial_cost_nestloop
+ * 'extra' contains various information about the join
+ * 'outer_path' is the outer path
+ * 'inner_path' is the inner path
+ * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
+ * 'pathkeys' are the path keys of the new join path
+ * 'required_outer' is the set of required outer rels
+ *
+ * Returns the resulting path node.
+ */
+NestPath *
+create_nestloop_path(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ JoinType jointype,
+ JoinCostWorkspace *workspace,
+ JoinPathExtraData *extra,
+ Path *outer_path,
+ Path *inner_path,
+ List *restrict_clauses,
+ List *pathkeys,
+ Relids required_outer)
+{
+ NestPath *pathnode = makeNode(NestPath);
+ Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
+
+ /*
+ * If the inner path is parameterized by the outer, we must drop any
+ * restrict_clauses that are due to be moved into the inner path. We have
+ * to do this now, rather than postpone the work till createplan time,
+ * because the restrict_clauses list can affect the size and cost
+ * estimates for this path.
+ */
+ if (bms_overlap(inner_req_outer, outer_path->parent->relids))
+ {
+ Relids inner_and_outer = bms_union(inner_path->parent->relids,
+ inner_req_outer);
+ List *jclauses = NIL;
+ ListCell *lc;
+
+ foreach(lc, restrict_clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ if (!join_clause_is_movable_into(rinfo,
+ inner_path->parent->relids,
+ inner_and_outer))
+ jclauses = lappend(jclauses, rinfo);
+ }
+ restrict_clauses = jclauses;
+ }
+
+ pathnode->jpath.path.pathtype = T_NestLoop;
+ pathnode->jpath.path.parent = joinrel;
+ pathnode->jpath.path.pathtarget = joinrel->reltarget;
+ pathnode->jpath.path.param_info =
+ get_joinrel_parampathinfo(root,
+ joinrel,
+ outer_path,
+ inner_path,
+ extra->sjinfo,
+ required_outer,
+ &restrict_clauses);
+ pathnode->jpath.path.parallel_aware = false;
+ pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
+ outer_path->parallel_safe && inner_path->parallel_safe;
+ /* This is a foolish way to estimate parallel_workers, but for now... */
+ pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
+ pathnode->jpath.path.pathkeys = pathkeys;
+ pathnode->jpath.jointype = jointype;
+ pathnode->jpath.inner_unique = extra->inner_unique;
+ pathnode->jpath.outerjoinpath = outer_path;
+ pathnode->jpath.innerjoinpath = inner_path;
+ pathnode->jpath.joinrestrictinfo = restrict_clauses;
+
+ final_cost_nestloop(root, pathnode, workspace, extra);
+
+ return pathnode;
+}
+
+/*
+ * create_mergejoin_path
+ * Creates a pathnode corresponding to a mergejoin join between
+ * two relations
+ *
+ * 'joinrel' is the join relation
+ * 'jointype' is the type of join required
+ * 'workspace' is the result from initial_cost_mergejoin
+ * 'extra' contains various information about the join
+ * 'outer_path' is the outer path
+ * 'inner_path' is the inner path
+ * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
+ * 'pathkeys' are the path keys of the new join path
+ * 'required_outer' is the set of required outer rels
+ * 'mergeclauses' are the RestrictInfo nodes to use as merge clauses
+ * (this should be a subset of the restrict_clauses list)
+ * 'outersortkeys' are the sort varkeys for the outer relation
+ * 'innersortkeys' are the sort varkeys for the inner relation
+ */
+MergePath *
+create_mergejoin_path(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ JoinType jointype,
+ JoinCostWorkspace *workspace,
+ JoinPathExtraData *extra,
+ Path *outer_path,
+ Path *inner_path,
+ List *restrict_clauses,
+ List *pathkeys,
+ Relids required_outer,
+ List *mergeclauses,
+ List *outersortkeys,
+ List *innersortkeys)
+{
+ MergePath *pathnode = makeNode(MergePath);
+
+ pathnode->jpath.path.pathtype = T_MergeJoin;
+ pathnode->jpath.path.parent = joinrel;
+ pathnode->jpath.path.pathtarget = joinrel->reltarget;
+ pathnode->jpath.path.param_info =
+ get_joinrel_parampathinfo(root,
+ joinrel,
+ outer_path,
+ inner_path,
+ extra->sjinfo,
+ required_outer,
+ &restrict_clauses);
+ pathnode->jpath.path.parallel_aware = false;
+ pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
+ outer_path->parallel_safe && inner_path->parallel_safe;
+ /* This is a foolish way to estimate parallel_workers, but for now... */
+ pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
+ pathnode->jpath.path.pathkeys = pathkeys;
+ pathnode->jpath.jointype = jointype;
+ pathnode->jpath.inner_unique = extra->inner_unique;
+ pathnode->jpath.outerjoinpath = outer_path;
+ pathnode->jpath.innerjoinpath = inner_path;
+ pathnode->jpath.joinrestrictinfo = restrict_clauses;
+ pathnode->path_mergeclauses = mergeclauses;
+ pathnode->outersortkeys = outersortkeys;
+ pathnode->innersortkeys = innersortkeys;
+ /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
+ /* pathnode->materialize_inner will be set by final_cost_mergejoin */
+
+ final_cost_mergejoin(root, pathnode, workspace, extra);
+
+ return pathnode;
+}
+
+/*
+ * create_hashjoin_path
+ * Creates a pathnode corresponding to a hash join between two relations.
+ *
+ * 'joinrel' is the join relation
+ * 'jointype' is the type of join required
+ * 'workspace' is the result from initial_cost_hashjoin
+ * 'extra' contains various information about the join
+ * 'outer_path' is the cheapest outer path
+ * 'inner_path' is the cheapest inner path
+ * 'parallel_hash' to select Parallel Hash of inner path (shared hash table)
+ * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
+ * 'required_outer' is the set of required outer rels
+ * 'hashclauses' are the RestrictInfo nodes to use as hash clauses
+ * (this should be a subset of the restrict_clauses list)
+ */
+HashPath *
+create_hashjoin_path(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ JoinType jointype,
+ JoinCostWorkspace *workspace,
+ JoinPathExtraData *extra,
+ Path *outer_path,
+ Path *inner_path,
+ bool parallel_hash,
+ List *restrict_clauses,
+ Relids required_outer,
+ List *hashclauses)
+{
+ HashPath *pathnode = makeNode(HashPath);
+
+ pathnode->jpath.path.pathtype = T_HashJoin;
+ pathnode->jpath.path.parent = joinrel;
+ pathnode->jpath.path.pathtarget = joinrel->reltarget;
+ pathnode->jpath.path.param_info =
+ get_joinrel_parampathinfo(root,
+ joinrel,
+ outer_path,
+ inner_path,
+ extra->sjinfo,
+ required_outer,
+ &restrict_clauses);
+ pathnode->jpath.path.parallel_aware =
+ joinrel->consider_parallel && parallel_hash;
+ pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
+ outer_path->parallel_safe && inner_path->parallel_safe;
+ /* This is a foolish way to estimate parallel_workers, but for now... */
+ pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
+
+ /*
+ * A hashjoin never has pathkeys, since its output ordering is
+ * unpredictable due to possible batching. XXX If the inner relation is
+ * small enough, we could instruct the executor that it must not batch,
+ * and then we could assume that the output inherits the outer relation's
+ * ordering, which might save a sort step. However there is considerable
+ * downside if our estimate of the inner relation size is badly off. For
+ * the moment we don't risk it. (Note also that if we wanted to take this
+ * seriously, joinpath.c would have to consider many more paths for the
+ * outer rel than it does now.)
+ */
+ pathnode->jpath.path.pathkeys = NIL;
+ pathnode->jpath.jointype = jointype;
+ pathnode->jpath.inner_unique = extra->inner_unique;
+ pathnode->jpath.outerjoinpath = outer_path;
+ pathnode->jpath.innerjoinpath = inner_path;
+ pathnode->jpath.joinrestrictinfo = restrict_clauses;
+ pathnode->path_hashclauses = hashclauses;
+ /* final_cost_hashjoin will fill in pathnode->num_batches */
+
+ final_cost_hashjoin(root, pathnode, workspace, extra);
+
+ return pathnode;
+}
+
+/*
+ * create_projection_path
+ * Creates a pathnode that represents performing a projection.
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'target' is the PathTarget to be computed
+ */
+ProjectionPath *
+create_projection_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *subpath,
+ PathTarget *target)
+{
+ ProjectionPath *pathnode = makeNode(ProjectionPath);
+ PathTarget *oldtarget;
+
+ /*
+ * We mustn't put a ProjectionPath directly above another; it's useless
+ * and will confuse create_projection_plan. Rather than making sure all
+ * callers handle that, let's implement it here, by stripping off any
+ * ProjectionPath in what we're given. Given this rule, there won't be
+ * more than one.
+ */
+ if (IsA(subpath, ProjectionPath))
+ {
+ ProjectionPath *subpp = (ProjectionPath *) subpath;
+
+ Assert(subpp->path.parent == rel);
+ subpath = subpp->subpath;
+ Assert(!IsA(subpath, ProjectionPath));
+ }
+
+ pathnode->path.pathtype = T_Result;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe &&
+ is_parallel_safe(root, (Node *) target->exprs);
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ /* Projection does not change the sort order */
+ pathnode->path.pathkeys = subpath->pathkeys;
+
+ pathnode->subpath = subpath;
+
+ /*
+ * We might not need a separate Result node. If the input plan node type
+ * can project, we can just tell it to project something else. Or, if it
+ * can't project but the desired target has the same expression list as
+ * what the input will produce anyway, we can still give it the desired
+ * tlist (possibly changing its ressortgroupref labels, but nothing else).
+ * Note: in the latter case, create_projection_plan has to recheck our
+ * conclusion; see comments therein.
+ */
+ oldtarget = subpath->pathtarget;
+ if (is_projection_capable_path(subpath) ||
+ equal(oldtarget->exprs, target->exprs))
+ {
+ /* No separate Result node needed */
+ pathnode->dummypp = true;
+
+ /*
+ * Set cost of plan as subpath's cost, adjusted for tlist replacement.
+ */
+ pathnode->path.rows = subpath->rows;
+ pathnode->path.startup_cost = subpath->startup_cost +
+ (target->cost.startup - oldtarget->cost.startup);
+ pathnode->path.total_cost = subpath->total_cost +
+ (target->cost.startup - oldtarget->cost.startup) +
+ (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
+ }
+ else
+ {
+ /* We really do need the Result node */
+ pathnode->dummypp = false;
+
+ /*
+ * The Result node's cost is cpu_tuple_cost per row, plus the cost of
+ * evaluating the tlist. There is no qual to worry about.
+ */
+ pathnode->path.rows = subpath->rows;
+ pathnode->path.startup_cost = subpath->startup_cost +
+ target->cost.startup;
+ pathnode->path.total_cost = subpath->total_cost +
+ target->cost.startup +
+ (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
+ }
+
+ return pathnode;
+}
+
+/*
+ * apply_projection_to_path
+ * Add a projection step, or just apply the target directly to given path.
+ *
+ * This has the same net effect as create_projection_path(), except that if
+ * a separate Result plan node isn't needed, we just replace the given path's
+ * pathtarget with the desired one. This must be used only when the caller
+ * knows that the given path isn't referenced elsewhere and so can be modified
+ * in-place.
+ *
+ * If the input path is a GatherPath or GatherMergePath, we try to push the
+ * new target down to its input as well; this is a yet more invasive
+ * modification of the input path, which create_projection_path() can't do.
+ *
+ * Note that we mustn't change the source path's parent link; so when it is
+ * add_path'd to "rel" things will be a bit inconsistent. So far that has
+ * not caused any trouble.
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'path' is the path representing the source of data
+ * 'target' is the PathTarget to be computed
+ */
+Path *
+apply_projection_to_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *path,
+ PathTarget *target)
+{
+ QualCost oldcost;
+
+ /*
+ * If given path can't project, we might need a Result node, so make a
+ * separate ProjectionPath.
+ */
+ if (!is_projection_capable_path(path))
+ return (Path *) create_projection_path(root, rel, path, target);
+
+ /*
+ * We can just jam the desired tlist into the existing path, being sure to
+ * update its cost estimates appropriately.
+ */
+ oldcost = path->pathtarget->cost;
+ path->pathtarget = target;
+
+ path->startup_cost += target->cost.startup - oldcost.startup;
+ path->total_cost += target->cost.startup - oldcost.startup +
+ (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
+
+ /*
+ * If the path happens to be a Gather or GatherMerge path, we'd like to
+ * arrange for the subpath to return the required target list so that
+ * workers can help project. But if there is something that is not
+ * parallel-safe in the target expressions, then we can't.
+ */
+ if ((IsA(path, GatherPath) || IsA(path, GatherMergePath)) &&
+ is_parallel_safe(root, (Node *) target->exprs))
+ {
+ /*
+ * We always use create_projection_path here, even if the subpath is
+ * projection-capable, so as to avoid modifying the subpath in place.
+ * It seems unlikely at present that there could be any other
+ * references to the subpath, but better safe than sorry.
+ *
+ * Note that we don't change the parallel path's cost estimates; it
+ * might be appropriate to do so, to reflect the fact that the bulk of
+ * the target evaluation will happen in workers.
+ */
+ if (IsA(path, GatherPath))
+ {
+ GatherPath *gpath = (GatherPath *) path;
+
+ gpath->subpath = (Path *)
+ create_projection_path(root,
+ gpath->subpath->parent,
+ gpath->subpath,
+ target);
+ }
+ else
+ {
+ GatherMergePath *gmpath = (GatherMergePath *) path;
+
+ gmpath->subpath = (Path *)
+ create_projection_path(root,
+ gmpath->subpath->parent,
+ gmpath->subpath,
+ target);
+ }
+ }
+ else if (path->parallel_safe &&
+ !is_parallel_safe(root, (Node *) target->exprs))
+ {
+ /*
+ * We're inserting a parallel-restricted target list into a path
+ * currently marked parallel-safe, so we have to mark it as no longer
+ * safe.
+ */
+ path->parallel_safe = false;
+ }
+
+ return path;
+}
+
+/*
+ * create_set_projection_path
+ * Creates a pathnode that represents performing a projection that
+ * includes set-returning functions.
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'target' is the PathTarget to be computed
+ */
+ProjectSetPath *
+create_set_projection_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *subpath,
+ PathTarget *target)
+{
+ ProjectSetPath *pathnode = makeNode(ProjectSetPath);
+ double tlist_rows;
+ ListCell *lc;
+
+ pathnode->path.pathtype = T_ProjectSet;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe &&
+ is_parallel_safe(root, (Node *) target->exprs);
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ /* Projection does not change the sort order XXX? */
+ pathnode->path.pathkeys = subpath->pathkeys;
+
+ pathnode->subpath = subpath;
+
+ /*
+ * Estimate number of rows produced by SRFs for each row of input; if
+ * there's more than one in this node, use the maximum.
+ */
+ tlist_rows = 1;
+ foreach(lc, target->exprs)
+ {
+ Node *node = (Node *) lfirst(lc);
+ double itemrows;
+
+ itemrows = expression_returns_set_rows(root, node);
+ if (tlist_rows < itemrows)
+ tlist_rows = itemrows;
+ }
+
+ /*
+ * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
+ * per input row, and half of cpu_tuple_cost for each added output row.
+ * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
+ * this estimate later.
+ */
+ pathnode->path.rows = subpath->rows * tlist_rows;
+ pathnode->path.startup_cost = subpath->startup_cost +
+ target->cost.startup;
+ pathnode->path.total_cost = subpath->total_cost +
+ target->cost.startup +
+ (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
+ (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
+
+ return pathnode;
+}
+
+/*
+ * create_incremental_sort_path
+ * Creates a pathnode that represents performing an incremental sort.
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'pathkeys' represents the desired sort order
+ * 'presorted_keys' is the number of keys by which the input path is
+ * already sorted
+ * 'limit_tuples' is the estimated bound on the number of output tuples,
+ * or -1 if no LIMIT or couldn't estimate
+ */
+IncrementalSortPath *
+create_incremental_sort_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *subpath,
+ List *pathkeys,
+ int presorted_keys,
+ double limit_tuples)
+{
+ IncrementalSortPath *sort = makeNode(IncrementalSortPath);
+ SortPath *pathnode = &sort->spath;
+
+ pathnode->path.pathtype = T_IncrementalSort;
+ pathnode->path.parent = rel;
+ /* Sort doesn't project, so use source path's pathtarget */
+ pathnode->path.pathtarget = subpath->pathtarget;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ pathnode->path.pathkeys = pathkeys;
+
+ pathnode->subpath = subpath;
+
+ cost_incremental_sort(&pathnode->path,
+ root, pathkeys, presorted_keys,
+ subpath->startup_cost,
+ subpath->total_cost,
+ subpath->rows,
+ subpath->pathtarget->width,
+ 0.0, /* XXX comparison_cost shouldn't be 0? */
+ work_mem, limit_tuples);
+
+ sort->nPresortedCols = presorted_keys;
+
+ return sort;
+}
+
+/*
+ * create_sort_path
+ * Creates a pathnode that represents performing an explicit sort.
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'pathkeys' represents the desired sort order
+ * 'limit_tuples' is the estimated bound on the number of output tuples,
+ * or -1 if no LIMIT or couldn't estimate
+ */
+SortPath *
+create_sort_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *subpath,
+ List *pathkeys,
+ double limit_tuples)
+{
+ SortPath *pathnode = makeNode(SortPath);
+
+ pathnode->path.pathtype = T_Sort;
+ pathnode->path.parent = rel;
+ /* Sort doesn't project, so use source path's pathtarget */
+ pathnode->path.pathtarget = subpath->pathtarget;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ pathnode->path.pathkeys = pathkeys;
+
+ pathnode->subpath = subpath;
+
+ cost_sort(&pathnode->path, root, pathkeys,
+ subpath->total_cost,
+ subpath->rows,
+ subpath->pathtarget->width,
+ 0.0, /* XXX comparison_cost shouldn't be 0? */
+ work_mem, limit_tuples);
+
+ return pathnode;
+}
+
+/*
+ * create_group_path
+ * Creates a pathnode that represents performing grouping of presorted input
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'target' is the PathTarget to be computed
+ * 'groupClause' is a list of SortGroupClause's representing the grouping
+ * 'qual' is the HAVING quals if any
+ * 'numGroups' is the estimated number of groups
+ */
+GroupPath *
+create_group_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *subpath,
+ List *groupClause,
+ List *qual,
+ double numGroups)
+{
+ GroupPath *pathnode = makeNode(GroupPath);
+ PathTarget *target = rel->reltarget;
+
+ pathnode->path.pathtype = T_Group;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ /* Group doesn't change sort ordering */
+ pathnode->path.pathkeys = subpath->pathkeys;
+
+ pathnode->subpath = subpath;
+
+ pathnode->groupClause = groupClause;
+ pathnode->qual = qual;
+
+ cost_group(&pathnode->path, root,
+ list_length(groupClause),
+ numGroups,
+ qual,
+ subpath->startup_cost, subpath->total_cost,
+ subpath->rows);
+
+ /* add tlist eval cost for each output row */
+ pathnode->path.startup_cost += target->cost.startup;
+ pathnode->path.total_cost += target->cost.startup +
+ target->cost.per_tuple * pathnode->path.rows;
+
+ return pathnode;
+}
+
+/*
+ * create_upper_unique_path
+ * Creates a pathnode that represents performing an explicit Unique step
+ * on presorted input.
+ *
+ * This produces a Unique plan node, but the use-case is so different from
+ * create_unique_path that it doesn't seem worth trying to merge the two.
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'numCols' is the number of grouping columns
+ * 'numGroups' is the estimated number of groups
+ *
+ * The input path must be sorted on the grouping columns, plus possibly
+ * additional columns; so the first numCols pathkeys are the grouping columns
+ */
+UpperUniquePath *
+create_upper_unique_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *subpath,
+ int numCols,
+ double numGroups)
+{
+ UpperUniquePath *pathnode = makeNode(UpperUniquePath);
+
+ pathnode->path.pathtype = T_Unique;
+ pathnode->path.parent = rel;
+ /* Unique doesn't project, so use source path's pathtarget */
+ pathnode->path.pathtarget = subpath->pathtarget;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ /* Unique doesn't change the input ordering */
+ pathnode->path.pathkeys = subpath->pathkeys;
+
+ pathnode->subpath = subpath;
+ pathnode->numkeys = numCols;
+
+ /*
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples. (XXX probably this is
+ * an overestimate.)
+ */
+ pathnode->path.startup_cost = subpath->startup_cost;
+ pathnode->path.total_cost = subpath->total_cost +
+ cpu_operator_cost * subpath->rows * numCols;
+ pathnode->path.rows = numGroups;
+
+ return pathnode;
+}
+
+/*
+ * create_agg_path
+ * Creates a pathnode that represents performing aggregation/grouping
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'target' is the PathTarget to be computed
+ * 'aggstrategy' is the Agg node's basic implementation strategy
+ * 'aggsplit' is the Agg node's aggregate-splitting mode
+ * 'groupClause' is a list of SortGroupClause's representing the grouping
+ * 'qual' is the HAVING quals if any
+ * 'aggcosts' contains cost info about the aggregate functions to be computed
+ * 'numGroups' is the estimated number of groups (1 if not grouping)
+ */
+AggPath *
+create_agg_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *subpath,
+ PathTarget *target,
+ AggStrategy aggstrategy,
+ AggSplit aggsplit,
+ List *groupClause,
+ List *qual,
+ const AggClauseCosts *aggcosts,
+ double numGroups)
+{
+ AggPath *pathnode = makeNode(AggPath);
+
+ pathnode->path.pathtype = T_Agg;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ if (aggstrategy == AGG_SORTED)
+ pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
+ else
+ pathnode->path.pathkeys = NIL; /* output is unordered */
+ pathnode->subpath = subpath;
+
+ pathnode->aggstrategy = aggstrategy;
+ pathnode->aggsplit = aggsplit;
+ pathnode->numGroups = numGroups;
+ pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0;
+ pathnode->groupClause = groupClause;
+ pathnode->qual = qual;
+
+ cost_agg(&pathnode->path, root,
+ aggstrategy, aggcosts,
+ list_length(groupClause), numGroups,
+ qual,
+ subpath->startup_cost, subpath->total_cost,
+ subpath->rows, subpath->pathtarget->width);
+
+ /* add tlist eval cost for each output row */
+ pathnode->path.startup_cost += target->cost.startup;
+ pathnode->path.total_cost += target->cost.startup +
+ target->cost.per_tuple * pathnode->path.rows;
+
+ return pathnode;
+}
+
+/*
+ * create_groupingsets_path
+ * Creates a pathnode that represents performing GROUPING SETS aggregation
+ *
+ * GroupingSetsPath represents sorted grouping with one or more grouping sets.
+ * The input path's result must be sorted to match the last entry in
+ * rollup_groupclauses.
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'target' is the PathTarget to be computed
+ * 'having_qual' is the HAVING quals if any
+ * 'rollups' is a list of RollupData nodes
+ * 'agg_costs' contains cost info about the aggregate functions to be computed
+ * 'numGroups' is the estimated total number of groups
+ */
+GroupingSetsPath *
+create_groupingsets_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *subpath,
+ List *having_qual,
+ AggStrategy aggstrategy,
+ List *rollups,
+ const AggClauseCosts *agg_costs,
+ double numGroups)
+{
+ GroupingSetsPath *pathnode = makeNode(GroupingSetsPath);
+ PathTarget *target = rel->reltarget;
+ ListCell *lc;
+ bool is_first = true;
+ bool is_first_sort = true;
+
+ /* The topmost generated Plan node will be an Agg */
+ pathnode->path.pathtype = T_Agg;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target;
+ pathnode->path.param_info = subpath->param_info;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ pathnode->subpath = subpath;
+
+ /*
+ * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
+ * to AGG_HASHED, here if possible.
+ */
+ if (aggstrategy == AGG_SORTED &&
+ list_length(rollups) == 1 &&
+ ((RollupData *) linitial(rollups))->groupClause == NIL)
+ aggstrategy = AGG_PLAIN;
+
+ if (aggstrategy == AGG_MIXED &&
+ list_length(rollups) == 1)
+ aggstrategy = AGG_HASHED;
+
+ /*
+ * Output will be in sorted order by group_pathkeys if, and only if, there
+ * is a single rollup operation on a non-empty list of grouping
+ * expressions.
+ */
+ if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
+ pathnode->path.pathkeys = root->group_pathkeys;
+ else
+ pathnode->path.pathkeys = NIL;
+
+ pathnode->aggstrategy = aggstrategy;
+ pathnode->rollups = rollups;
+ pathnode->qual = having_qual;
+ pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0;
+
+ Assert(rollups != NIL);
+ Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
+ Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
+
+ foreach(lc, rollups)
+ {
+ RollupData *rollup = lfirst(lc);
+ List *gsets = rollup->gsets;
+ int numGroupCols = list_length(linitial(gsets));
+
+ /*
+ * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
+ * (already-sorted) input, and following ones do their own sort.
+ *
+ * In AGG_HASHED mode, there is one rollup for each grouping set.
+ *
+ * In AGG_MIXED mode, the first rollups are hashed, the first
+ * non-hashed one takes the (already-sorted) input, and following ones
+ * do their own sort.
+ */
+ if (is_first)
+ {
+ cost_agg(&pathnode->path, root,
+ aggstrategy,
+ agg_costs,
+ numGroupCols,
+ rollup->numGroups,
+ having_qual,
+ subpath->startup_cost,
+ subpath->total_cost,
+ subpath->rows,
+ subpath->pathtarget->width);
+ is_first = false;
+ if (!rollup->is_hashed)
+ is_first_sort = false;
+ }
+ else
+ {
+ Path sort_path; /* dummy for result of cost_sort */
+ Path agg_path; /* dummy for result of cost_agg */
+
+ if (rollup->is_hashed || is_first_sort)
+ {
+ /*
+ * Account for cost of aggregation, but don't charge input
+ * cost again
+ */
+ cost_agg(&agg_path, root,
+ rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
+ agg_costs,
+ numGroupCols,
+ rollup->numGroups,
+ having_qual,
+ 0.0, 0.0,
+ subpath->rows,
+ subpath->pathtarget->width);
+ if (!rollup->is_hashed)
+ is_first_sort = false;
+ }
+ else
+ {
+ /* Account for cost of sort, but don't charge input cost again */
+ cost_sort(&sort_path, root, NIL,
+ 0.0,
+ subpath->rows,
+ subpath->pathtarget->width,
+ 0.0,
+ work_mem,
+ -1.0);
+
+ /* Account for cost of aggregation */
+
+ cost_agg(&agg_path, root,
+ AGG_SORTED,
+ agg_costs,
+ numGroupCols,
+ rollup->numGroups,
+ having_qual,
+ sort_path.startup_cost,
+ sort_path.total_cost,
+ sort_path.rows,
+ subpath->pathtarget->width);
+ }
+
+ pathnode->path.total_cost += agg_path.total_cost;
+ pathnode->path.rows += agg_path.rows;
+ }
+ }
+
+ /* add tlist eval cost for each output row */
+ pathnode->path.startup_cost += target->cost.startup;
+ pathnode->path.total_cost += target->cost.startup +
+ target->cost.per_tuple * pathnode->path.rows;
+
+ return pathnode;
+}
+
+/*
+ * create_minmaxagg_path
+ * Creates a pathnode that represents computation of MIN/MAX aggregates
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'target' is the PathTarget to be computed
+ * 'mmaggregates' is a list of MinMaxAggInfo structs
+ * 'quals' is the HAVING quals if any
+ */
+MinMaxAggPath *
+create_minmaxagg_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ PathTarget *target,
+ List *mmaggregates,
+ List *quals)
+{
+ MinMaxAggPath *pathnode = makeNode(MinMaxAggPath);
+ Cost initplan_cost;
+ ListCell *lc;
+
+ /* The topmost generated Plan node will be a Result */
+ pathnode->path.pathtype = T_Result;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ /* A MinMaxAggPath implies use of initplans, so cannot be parallel-safe */
+ pathnode->path.parallel_safe = false;
+ pathnode->path.parallel_workers = 0;
+ /* Result is one unordered row */
+ pathnode->path.rows = 1;
+ pathnode->path.pathkeys = NIL;
+
+ pathnode->mmaggregates = mmaggregates;
+ pathnode->quals = quals;
+
+ /* Calculate cost of all the initplans ... */
+ initplan_cost = 0;
+ foreach(lc, mmaggregates)
+ {
+ MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
+
+ initplan_cost += mminfo->pathcost;
+ }
+
+ /* add tlist eval cost for each output row, plus cpu_tuple_cost */
+ pathnode->path.startup_cost = initplan_cost + target->cost.startup;
+ pathnode->path.total_cost = initplan_cost + target->cost.startup +
+ target->cost.per_tuple + cpu_tuple_cost;
+
+ /*
+ * Add cost of qual, if any --- but we ignore its selectivity, since our
+ * rowcount estimate should be 1 no matter what the qual is.
+ */
+ if (quals)
+ {
+ QualCost qual_cost;
+
+ cost_qual_eval(&qual_cost, quals, root);
+ pathnode->path.startup_cost += qual_cost.startup;
+ pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
+ }
+
+ return pathnode;
+}
+
+/*
+ * create_windowagg_path
+ * Creates a pathnode that represents computation of window functions
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'target' is the PathTarget to be computed
+ * 'windowFuncs' is a list of WindowFunc structs
+ * 'winclause' is a WindowClause that is common to all the WindowFuncs
+ * 'qual' WindowClause.runconditions from lower-level WindowAggPaths.
+ * Must always be NIL when topwindow == false
+ * 'topwindow' pass as true only for the top-level WindowAgg. False for all
+ * intermediate WindowAggs.
+ *
+ * The input must be sorted according to the WindowClause's PARTITION keys
+ * plus ORDER BY keys.
+ */
+WindowAggPath *
+create_windowagg_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *subpath,
+ PathTarget *target,
+ List *windowFuncs,
+ WindowClause *winclause,
+ List *qual,
+ bool topwindow)
+{
+ WindowAggPath *pathnode = makeNode(WindowAggPath);
+
+ /* qual can only be set for the topwindow */
+ Assert(qual == NIL || topwindow);
+
+ pathnode->path.pathtype = T_WindowAgg;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ /* WindowAgg preserves the input sort order */
+ pathnode->path.pathkeys = subpath->pathkeys;
+
+ pathnode->subpath = subpath;
+ pathnode->winclause = winclause;
+ pathnode->qual = qual;
+ pathnode->topwindow = topwindow;
+
+ /*
+ * For costing purposes, assume that there are no redundant partitioning
+ * or ordering columns; it's not worth the trouble to deal with that
+ * corner case here. So we just pass the unmodified list lengths to
+ * cost_windowagg.
+ */
+ cost_windowagg(&pathnode->path, root,
+ windowFuncs,
+ list_length(winclause->partitionClause),
+ list_length(winclause->orderClause),
+ subpath->startup_cost,
+ subpath->total_cost,
+ subpath->rows);
+
+ /* add tlist eval cost for each output row */
+ pathnode->path.startup_cost += target->cost.startup;
+ pathnode->path.total_cost += target->cost.startup +
+ target->cost.per_tuple * pathnode->path.rows;
+
+ return pathnode;
+}
+
+/*
+ * create_setop_path
+ * Creates a pathnode that represents computation of INTERSECT or EXCEPT
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'cmd' is the specific semantics (INTERSECT or EXCEPT, with/without ALL)
+ * 'strategy' is the implementation strategy (sorted or hashed)
+ * 'distinctList' is a list of SortGroupClause's representing the grouping
+ * 'flagColIdx' is the column number where the flag column will be, if any
+ * 'firstFlag' is the flag value for the first input relation when hashing;
+ * or -1 when sorting
+ * 'numGroups' is the estimated number of distinct groups
+ * 'outputRows' is the estimated number of output rows
+ */
+SetOpPath *
+create_setop_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *subpath,
+ SetOpCmd cmd,
+ SetOpStrategy strategy,
+ List *distinctList,
+ AttrNumber flagColIdx,
+ int firstFlag,
+ double numGroups,
+ double outputRows)
+{
+ SetOpPath *pathnode = makeNode(SetOpPath);
+
+ pathnode->path.pathtype = T_SetOp;
+ pathnode->path.parent = rel;
+ /* SetOp doesn't project, so use source path's pathtarget */
+ pathnode->path.pathtarget = subpath->pathtarget;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ /* SetOp preserves the input sort order if in sort mode */
+ pathnode->path.pathkeys =
+ (strategy == SETOP_SORTED) ? subpath->pathkeys : NIL;
+
+ pathnode->subpath = subpath;
+ pathnode->cmd = cmd;
+ pathnode->strategy = strategy;
+ pathnode->distinctList = distinctList;
+ pathnode->flagColIdx = flagColIdx;
+ pathnode->firstFlag = firstFlag;
+ pathnode->numGroups = numGroups;
+
+ /*
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples.
+ */
+ pathnode->path.startup_cost = subpath->startup_cost;
+ pathnode->path.total_cost = subpath->total_cost +
+ cpu_operator_cost * subpath->rows * list_length(distinctList);
+ pathnode->path.rows = outputRows;
+
+ return pathnode;
+}
+
+/*
+ * create_recursiveunion_path
+ * Creates a pathnode that represents a recursive UNION node
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'leftpath' is the source of data for the non-recursive term
+ * 'rightpath' is the source of data for the recursive term
+ * 'target' is the PathTarget to be computed
+ * 'distinctList' is a list of SortGroupClause's representing the grouping
+ * 'wtParam' is the ID of Param representing work table
+ * 'numGroups' is the estimated number of groups
+ *
+ * For recursive UNION ALL, distinctList is empty and numGroups is zero
+ */
+RecursiveUnionPath *
+create_recursiveunion_path(PlannerInfo *root,
+ RelOptInfo *rel,
+ Path *leftpath,
+ Path *rightpath,
+ PathTarget *target,
+ List *distinctList,
+ int wtParam,
+ double numGroups)
+{
+ RecursiveUnionPath *pathnode = makeNode(RecursiveUnionPath);
+
+ pathnode->path.pathtype = T_RecursiveUnion;
+ pathnode->path.parent = rel;
+ pathnode->path.pathtarget = target;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ leftpath->parallel_safe && rightpath->parallel_safe;
+ /* Foolish, but we'll do it like joins for now: */
+ pathnode->path.parallel_workers = leftpath->parallel_workers;
+ /* RecursiveUnion result is always unsorted */
+ pathnode->path.pathkeys = NIL;
+
+ pathnode->leftpath = leftpath;
+ pathnode->rightpath = rightpath;
+ pathnode->distinctList = distinctList;
+ pathnode->wtParam = wtParam;
+ pathnode->numGroups = numGroups;
+
+ cost_recursive_union(&pathnode->path, leftpath, rightpath);
+
+ return pathnode;
+}
+
+/*
+ * create_lockrows_path
+ * Creates a pathnode that represents acquiring row locks
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'rowMarks' is a list of PlanRowMark's
+ * 'epqParam' is the ID of Param for EvalPlanQual re-eval
+ */
+LockRowsPath *
+create_lockrows_path(PlannerInfo *root, RelOptInfo *rel,
+ Path *subpath, List *rowMarks, int epqParam)
+{
+ LockRowsPath *pathnode = makeNode(LockRowsPath);
+
+ pathnode->path.pathtype = T_LockRows;
+ pathnode->path.parent = rel;
+ /* LockRows doesn't project, so use source path's pathtarget */
+ pathnode->path.pathtarget = subpath->pathtarget;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = false;
+ pathnode->path.parallel_workers = 0;
+ pathnode->path.rows = subpath->rows;
+
+ /*
+ * The result cannot be assumed sorted, since locking might cause the sort
+ * key columns to be replaced with new values.
+ */
+ pathnode->path.pathkeys = NIL;
+
+ pathnode->subpath = subpath;
+ pathnode->rowMarks = rowMarks;
+ pathnode->epqParam = epqParam;
+
+ /*
+ * We should charge something extra for the costs of row locking and
+ * possible refetches, but it's hard to say how much. For now, use
+ * cpu_tuple_cost per row.
+ */
+ pathnode->path.startup_cost = subpath->startup_cost;
+ pathnode->path.total_cost = subpath->total_cost +
+ cpu_tuple_cost * subpath->rows;
+
+ return pathnode;
+}
+
+/*
+ * create_modifytable_path
+ * Creates a pathnode that represents performing INSERT/UPDATE/DELETE/MERGE
+ * mods
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is a Path producing source data
+ * 'operation' is the operation type
+ * 'canSetTag' is true if we set the command tag/es_processed
+ * 'nominalRelation' is the parent RT index for use of EXPLAIN
+ * 'rootRelation' is the partitioned/inherited table root RTI, or 0 if none
+ * 'partColsUpdated' is true if any partitioning columns are being updated,
+ * either from the target relation or a descendent partitioned table.
+ * 'resultRelations' is an integer list of actual RT indexes of target rel(s)
+ * 'updateColnosLists' is a list of UPDATE target column number lists
+ * (one sublist per rel); or NIL if not an UPDATE
+ * 'withCheckOptionLists' is a list of WCO lists (one per rel)
+ * 'returningLists' is a list of RETURNING tlists (one per rel)
+ * 'rowMarks' is a list of PlanRowMarks (non-locking only)
+ * 'onconflict' is the ON CONFLICT clause, or NULL
+ * 'epqParam' is the ID of Param for EvalPlanQual re-eval
+ * 'mergeActionLists' is a list of lists of MERGE actions (one per rel)
+ */
+ModifyTablePath *
+create_modifytable_path(PlannerInfo *root, RelOptInfo *rel,
+ Path *subpath,
+ CmdType operation, bool canSetTag,
+ Index nominalRelation, Index rootRelation,
+ bool partColsUpdated,
+ List *resultRelations,
+ List *updateColnosLists,
+ List *withCheckOptionLists, List *returningLists,
+ List *rowMarks, OnConflictExpr *onconflict,
+ List *mergeActionLists, int epqParam)
+{
+ ModifyTablePath *pathnode = makeNode(ModifyTablePath);
+
+ Assert(operation == CMD_MERGE ||
+ (operation == CMD_UPDATE ?
+ list_length(resultRelations) == list_length(updateColnosLists) :
+ updateColnosLists == NIL));
+ Assert(withCheckOptionLists == NIL ||
+ list_length(resultRelations) == list_length(withCheckOptionLists));
+ Assert(returningLists == NIL ||
+ list_length(resultRelations) == list_length(returningLists));
+
+ pathnode->path.pathtype = T_ModifyTable;
+ pathnode->path.parent = rel;
+ /* pathtarget is not interesting, just make it minimally valid */
+ pathnode->path.pathtarget = rel->reltarget;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = false;
+ pathnode->path.parallel_workers = 0;
+ pathnode->path.pathkeys = NIL;
+
+ /*
+ * Compute cost & rowcount as subpath cost & rowcount (if RETURNING)
+ *
+ * Currently, we don't charge anything extra for the actual table
+ * modification work, nor for the WITH CHECK OPTIONS or RETURNING
+ * expressions if any. It would only be window dressing, since
+ * ModifyTable is always a top-level node and there is no way for the
+ * costs to change any higher-level planning choices. But we might want
+ * to make it look better sometime.
+ */
+ pathnode->path.startup_cost = subpath->startup_cost;
+ pathnode->path.total_cost = subpath->total_cost;
+ if (returningLists != NIL)
+ {
+ pathnode->path.rows = subpath->rows;
+
+ /*
+ * Set width to match the subpath output. XXX this is totally wrong:
+ * we should return an average of the RETURNING tlist widths. But
+ * it's what happened historically, and improving it is a task for
+ * another day. (Again, it's mostly window dressing.)
+ */
+ pathnode->path.pathtarget->width = subpath->pathtarget->width;
+ }
+ else
+ {
+ pathnode->path.rows = 0;
+ pathnode->path.pathtarget->width = 0;
+ }
+
+ pathnode->subpath = subpath;
+ pathnode->operation = operation;
+ pathnode->canSetTag = canSetTag;
+ pathnode->nominalRelation = nominalRelation;
+ pathnode->rootRelation = rootRelation;
+ pathnode->partColsUpdated = partColsUpdated;
+ pathnode->resultRelations = resultRelations;
+ pathnode->updateColnosLists = updateColnosLists;
+ pathnode->withCheckOptionLists = withCheckOptionLists;
+ pathnode->returningLists = returningLists;
+ pathnode->rowMarks = rowMarks;
+ pathnode->onconflict = onconflict;
+ pathnode->epqParam = epqParam;
+ pathnode->mergeActionLists = mergeActionLists;
+
+ return pathnode;
+}
+
+/*
+ * create_limit_path
+ * Creates a pathnode that represents performing LIMIT/OFFSET
+ *
+ * In addition to providing the actual OFFSET and LIMIT expressions,
+ * the caller must provide estimates of their values for costing purposes.
+ * The estimates are as computed by preprocess_limit(), ie, 0 represents
+ * the clause not being present, and -1 means it's present but we could
+ * not estimate its value.
+ *
+ * 'rel' is the parent relation associated with the result
+ * 'subpath' is the path representing the source of data
+ * 'limitOffset' is the actual OFFSET expression, or NULL
+ * 'limitCount' is the actual LIMIT expression, or NULL
+ * 'offset_est' is the estimated value of the OFFSET expression
+ * 'count_est' is the estimated value of the LIMIT expression
+ */
+LimitPath *
+create_limit_path(PlannerInfo *root, RelOptInfo *rel,
+ Path *subpath,
+ Node *limitOffset, Node *limitCount,
+ LimitOption limitOption,
+ int64 offset_est, int64 count_est)
+{
+ LimitPath *pathnode = makeNode(LimitPath);
+
+ pathnode->path.pathtype = T_Limit;
+ pathnode->path.parent = rel;
+ /* Limit doesn't project, so use source path's pathtarget */
+ pathnode->path.pathtarget = subpath->pathtarget;
+ /* For now, assume we are above any joins, so no parameterization */
+ pathnode->path.param_info = NULL;
+ pathnode->path.parallel_aware = false;
+ pathnode->path.parallel_safe = rel->consider_parallel &&
+ subpath->parallel_safe;
+ pathnode->path.parallel_workers = subpath->parallel_workers;
+ pathnode->path.rows = subpath->rows;
+ pathnode->path.startup_cost = subpath->startup_cost;
+ pathnode->path.total_cost = subpath->total_cost;
+ pathnode->path.pathkeys = subpath->pathkeys;
+ pathnode->subpath = subpath;
+ pathnode->limitOffset = limitOffset;
+ pathnode->limitCount = limitCount;
+ pathnode->limitOption = limitOption;
+
+ /*
+ * Adjust the output rows count and costs according to the offset/limit.
+ */
+ adjust_limit_rows_costs(&pathnode->path.rows,
+ &pathnode->path.startup_cost,
+ &pathnode->path.total_cost,
+ offset_est, count_est);
+
+ return pathnode;
+}
+
+/*
+ * adjust_limit_rows_costs
+ * Adjust the size and cost estimates for a LimitPath node according to the
+ * offset/limit.
+ *
+ * This is only a cosmetic issue if we are at top level, but if we are
+ * building a subquery then it's important to report correct info to the outer
+ * planner.
+ *
+ * When the offset or count couldn't be estimated, use 10% of the estimated
+ * number of rows emitted from the subpath.
+ *
+ * XXX we don't bother to add eval costs of the offset/limit expressions
+ * themselves to the path costs. In theory we should, but in most cases those
+ * expressions are trivial and it's just not worth the trouble.
+ */
+void
+adjust_limit_rows_costs(double *rows, /* in/out parameter */
+ Cost *startup_cost, /* in/out parameter */
+ Cost *total_cost, /* in/out parameter */
+ int64 offset_est,
+ int64 count_est)
+{
+ double input_rows = *rows;
+ Cost input_startup_cost = *startup_cost;
+ Cost input_total_cost = *total_cost;
+
+ if (offset_est != 0)
+ {
+ double offset_rows;
+
+ if (offset_est > 0)
+ offset_rows = (double) offset_est;
+ else
+ offset_rows = clamp_row_est(input_rows * 0.10);
+ if (offset_rows > *rows)
+ offset_rows = *rows;
+ if (input_rows > 0)
+ *startup_cost +=
+ (input_total_cost - input_startup_cost)
+ * offset_rows / input_rows;
+ *rows -= offset_rows;
+ if (*rows < 1)
+ *rows = 1;
+ }
+
+ if (count_est != 0)
+ {
+ double count_rows;
+
+ if (count_est > 0)
+ count_rows = (double) count_est;
+ else
+ count_rows = clamp_row_est(input_rows * 0.10);
+ if (count_rows > *rows)
+ count_rows = *rows;
+ if (input_rows > 0)
+ *total_cost = *startup_cost +
+ (input_total_cost - input_startup_cost)
+ * count_rows / input_rows;
+ *rows = count_rows;
+ if (*rows < 1)
+ *rows = 1;
+ }
+}
+
+
+/*
+ * reparameterize_path
+ * Attempt to modify a Path to have greater parameterization
+ *
+ * We use this to attempt to bring all child paths of an appendrel to the
+ * same parameterization level, ensuring that they all enforce the same set
+ * of join quals (and thus that that parameterization can be attributed to
+ * an append path built from such paths). Currently, only a few path types
+ * are supported here, though more could be added at need. We return NULL
+ * if we can't reparameterize the given path.
+ *
+ * Note: we intentionally do not pass created paths to add_path(); it would
+ * possibly try to delete them on the grounds of being cost-inferior to the
+ * paths they were made from, and we don't want that. Paths made here are
+ * not necessarily of general-purpose usefulness, but they can be useful
+ * as members of an append path.
+ */
+Path *
+reparameterize_path(PlannerInfo *root, Path *path,
+ Relids required_outer,
+ double loop_count)
+{
+ RelOptInfo *rel = path->parent;
+
+ /* Can only increase, not decrease, path's parameterization */
+ if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
+ return NULL;
+ switch (path->pathtype)
+ {
+ case T_SeqScan:
+ return create_seqscan_path(root, rel, required_outer, 0);
+ case T_SampleScan:
+ return (Path *) create_samplescan_path(root, rel, required_outer);
+ case T_IndexScan:
+ case T_IndexOnlyScan:
+ {
+ IndexPath *ipath = (IndexPath *) path;
+ IndexPath *newpath = makeNode(IndexPath);
+
+ /*
+ * We can't use create_index_path directly, and would not want
+ * to because it would re-compute the indexqual conditions
+ * which is wasted effort. Instead we hack things a bit:
+ * flat-copy the path node, revise its param_info, and redo
+ * the cost estimate.
+ */
+ memcpy(newpath, ipath, sizeof(IndexPath));
+ newpath->path.param_info =
+ get_baserel_parampathinfo(root, rel, required_outer);
+ cost_index(newpath, root, loop_count, false);
+ return (Path *) newpath;
+ }
+ case T_BitmapHeapScan:
+ {
+ BitmapHeapPath *bpath = (BitmapHeapPath *) path;
+
+ return (Path *) create_bitmap_heap_path(root,
+ rel,
+ bpath->bitmapqual,
+ required_outer,
+ loop_count, 0);
+ }
+ case T_SubqueryScan:
+ {
+ SubqueryScanPath *spath = (SubqueryScanPath *) path;
+
+ return (Path *) create_subqueryscan_path(root,
+ rel,
+ spath->subpath,
+ spath->path.pathkeys,
+ required_outer);
+ }
+ case T_Result:
+ /* Supported only for RTE_RESULT scan paths */
+ if (IsA(path, Path))
+ return create_resultscan_path(root, rel, required_outer);
+ break;
+ case T_Append:
+ {
+ AppendPath *apath = (AppendPath *) path;
+ List *childpaths = NIL;
+ List *partialpaths = NIL;
+ int i;
+ ListCell *lc;
+
+ /* Reparameterize the children */
+ i = 0;
+ foreach(lc, apath->subpaths)
+ {
+ Path *spath = (Path *) lfirst(lc);
+
+ spath = reparameterize_path(root, spath,
+ required_outer,
+ loop_count);
+ if (spath == NULL)
+ return NULL;
+ /* We have to re-split the regular and partial paths */
+ if (i < apath->first_partial_path)
+ childpaths = lappend(childpaths, spath);
+ else
+ partialpaths = lappend(partialpaths, spath);
+ i++;
+ }
+ return (Path *)
+ create_append_path(root, rel, childpaths, partialpaths,
+ apath->path.pathkeys, required_outer,
+ apath->path.parallel_workers,
+ apath->path.parallel_aware,
+ -1);
+ }
+ case T_Memoize:
+ {
+ MemoizePath *mpath = (MemoizePath *) path;
+ Path *spath = mpath->subpath;
+
+ spath = reparameterize_path(root, spath,
+ required_outer,
+ loop_count);
+ if (spath == NULL)
+ return NULL;
+ return (Path *) create_memoize_path(root, rel,
+ spath,
+ mpath->param_exprs,
+ mpath->hash_operators,
+ mpath->singlerow,
+ mpath->binary_mode,
+ mpath->calls);
+ }
+ default:
+ break;
+ }
+ return NULL;
+}
+
+/*
+ * reparameterize_path_by_child
+ * Given a path parameterized by the parent of the given child relation,
+ * translate the path to be parameterized by the given child relation.
+ *
+ * The function creates a new path of the same type as the given path, but
+ * parameterized by the given child relation. Most fields from the original
+ * path can simply be flat-copied, but any expressions must be adjusted to
+ * refer to the correct varnos, and any paths must be recursively
+ * reparameterized. Other fields that refer to specific relids also need
+ * adjustment.
+ *
+ * The cost, number of rows, width and parallel path properties depend upon
+ * path->parent, which does not change during the translation. Hence those
+ * members are copied as they are.
+ *
+ * If the given path can not be reparameterized, the function returns NULL.
+ */
+Path *
+reparameterize_path_by_child(PlannerInfo *root, Path *path,
+ RelOptInfo *child_rel)
+{
+
+#define FLAT_COPY_PATH(newnode, node, nodetype) \
+ ( (newnode) = makeNode(nodetype), \
+ memcpy((newnode), (node), sizeof(nodetype)) )
+
+#define ADJUST_CHILD_ATTRS(node) \
+ ((node) = \
+ (List *) adjust_appendrel_attrs_multilevel(root, (Node *) (node), \
+ child_rel->relids, \
+ child_rel->top_parent_relids))
+
+#define REPARAMETERIZE_CHILD_PATH(path) \
+do { \
+ (path) = reparameterize_path_by_child(root, (path), child_rel); \
+ if ((path) == NULL) \
+ return NULL; \
+} while(0)
+
+#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
+do { \
+ if ((pathlist) != NIL) \
+ { \
+ (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
+ child_rel); \
+ if ((pathlist) == NIL) \
+ return NULL; \
+ } \
+} while(0)
+
+ Path *new_path;
+ ParamPathInfo *new_ppi;
+ ParamPathInfo *old_ppi;
+ Relids required_outer;
+
+ /*
+ * If the path is not parameterized by parent of the given relation, it
+ * doesn't need reparameterization.
+ */
+ if (!path->param_info ||
+ !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
+ return path;
+
+ /*
+ * If possible, reparameterize the given path, making a copy.
+ *
+ * This function is currently only applied to the inner side of a nestloop
+ * join that is being partitioned by the partitionwise-join code. Hence,
+ * we need only support path types that plausibly arise in that context.
+ * (In particular, supporting sorted path types would be a waste of code
+ * and cycles: even if we translated them here, they'd just lose in
+ * subsequent cost comparisons.) If we do see an unsupported path type,
+ * that just means we won't be able to generate a partitionwise-join plan
+ * using that path type.
+ */
+ switch (nodeTag(path))
+ {
+ case T_Path:
+ FLAT_COPY_PATH(new_path, path, Path);
+ break;
+
+ case T_IndexPath:
+ {
+ IndexPath *ipath;
+
+ FLAT_COPY_PATH(ipath, path, IndexPath);
+ ADJUST_CHILD_ATTRS(ipath->indexclauses);
+ new_path = (Path *) ipath;
+ }
+ break;
+
+ case T_BitmapHeapPath:
+ {
+ BitmapHeapPath *bhpath;
+
+ FLAT_COPY_PATH(bhpath, path, BitmapHeapPath);
+ REPARAMETERIZE_CHILD_PATH(bhpath->bitmapqual);
+ new_path = (Path *) bhpath;
+ }
+ break;
+
+ case T_BitmapAndPath:
+ {
+ BitmapAndPath *bapath;
+
+ FLAT_COPY_PATH(bapath, path, BitmapAndPath);
+ REPARAMETERIZE_CHILD_PATH_LIST(bapath->bitmapquals);
+ new_path = (Path *) bapath;
+ }
+ break;
+
+ case T_BitmapOrPath:
+ {
+ BitmapOrPath *bopath;
+
+ FLAT_COPY_PATH(bopath, path, BitmapOrPath);
+ REPARAMETERIZE_CHILD_PATH_LIST(bopath->bitmapquals);
+ new_path = (Path *) bopath;
+ }
+ break;
+
+ case T_ForeignPath:
+ {
+ ForeignPath *fpath;
+ ReparameterizeForeignPathByChild_function rfpc_func;
+
+ FLAT_COPY_PATH(fpath, path, ForeignPath);
+ if (fpath->fdw_outerpath)
+ REPARAMETERIZE_CHILD_PATH(fpath->fdw_outerpath);
+
+ /* Hand over to FDW if needed. */
+ rfpc_func =
+ path->parent->fdwroutine->ReparameterizeForeignPathByChild;
+ if (rfpc_func)
+ fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
+ child_rel);
+ new_path = (Path *) fpath;
+ }
+ break;
+
+ case T_CustomPath:
+ {
+ CustomPath *cpath;
+
+ FLAT_COPY_PATH(cpath, path, CustomPath);
+ REPARAMETERIZE_CHILD_PATH_LIST(cpath->custom_paths);
+ if (cpath->methods &&
+ cpath->methods->ReparameterizeCustomPathByChild)
+ cpath->custom_private =
+ cpath->methods->ReparameterizeCustomPathByChild(root,
+ cpath->custom_private,
+ child_rel);
+ new_path = (Path *) cpath;
+ }
+ break;
+
+ case T_NestPath:
+ {
+ JoinPath *jpath;
+ NestPath *npath;
+
+ FLAT_COPY_PATH(npath, path, NestPath);
+
+ jpath = (JoinPath *) npath;
+ REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath);
+ REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath);
+ ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo);
+ new_path = (Path *) npath;
+ }
+ break;
+
+ case T_MergePath:
+ {
+ JoinPath *jpath;
+ MergePath *mpath;
+
+ FLAT_COPY_PATH(mpath, path, MergePath);
+
+ jpath = (JoinPath *) mpath;
+ REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath);
+ REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath);
+ ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo);
+ ADJUST_CHILD_ATTRS(mpath->path_mergeclauses);
+ new_path = (Path *) mpath;
+ }
+ break;
+
+ case T_HashPath:
+ {
+ JoinPath *jpath;
+ HashPath *hpath;
+
+ FLAT_COPY_PATH(hpath, path, HashPath);
+
+ jpath = (JoinPath *) hpath;
+ REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath);
+ REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath);
+ ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo);
+ ADJUST_CHILD_ATTRS(hpath->path_hashclauses);
+ new_path = (Path *) hpath;
+ }
+ break;
+
+ case T_AppendPath:
+ {
+ AppendPath *apath;
+
+ FLAT_COPY_PATH(apath, path, AppendPath);
+ REPARAMETERIZE_CHILD_PATH_LIST(apath->subpaths);
+ new_path = (Path *) apath;
+ }
+ break;
+
+ case T_MemoizePath:
+ {
+ MemoizePath *mpath;
+
+ FLAT_COPY_PATH(mpath, path, MemoizePath);
+ REPARAMETERIZE_CHILD_PATH(mpath->subpath);
+ ADJUST_CHILD_ATTRS(mpath->param_exprs);
+ new_path = (Path *) mpath;
+ }
+ break;
+
+ case T_GatherPath:
+ {
+ GatherPath *gpath;
+
+ FLAT_COPY_PATH(gpath, path, GatherPath);
+ REPARAMETERIZE_CHILD_PATH(gpath->subpath);
+ new_path = (Path *) gpath;
+ }
+ break;
+
+ default:
+
+ /* We don't know how to reparameterize this path. */
+ return NULL;
+ }
+
+ /*
+ * Adjust the parameterization information, which refers to the topmost
+ * parent. The topmost parent can be multiple levels away from the given
+ * child, hence use multi-level expression adjustment routines.
+ */
+ old_ppi = new_path->param_info;
+ required_outer =
+ adjust_child_relids_multilevel(root, old_ppi->ppi_req_outer,
+ child_rel->relids,
+ child_rel->top_parent_relids);
+
+ /* If we already have a PPI for this parameterization, just return it */
+ new_ppi = find_param_path_info(new_path->parent, required_outer);
+
+ /*
+ * If not, build a new one and link it to the list of PPIs. For the same
+ * reason as explained in mark_dummy_rel(), allocate new PPI in the same
+ * context the given RelOptInfo is in.
+ */
+ if (new_ppi == NULL)
+ {
+ MemoryContext oldcontext;
+ RelOptInfo *rel = path->parent;
+
+ oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
+
+ new_ppi = makeNode(ParamPathInfo);
+ new_ppi->ppi_req_outer = bms_copy(required_outer);
+ new_ppi->ppi_rows = old_ppi->ppi_rows;
+ new_ppi->ppi_clauses = old_ppi->ppi_clauses;
+ ADJUST_CHILD_ATTRS(new_ppi->ppi_clauses);
+ rel->ppilist = lappend(rel->ppilist, new_ppi);
+
+ MemoryContextSwitchTo(oldcontext);
+ }
+ bms_free(required_outer);
+
+ new_path->param_info = new_ppi;
+
+ /*
+ * Adjust the path target if the parent of the outer relation is
+ * referenced in the targetlist. This can happen when only the parent of
+ * outer relation is laterally referenced in this relation.
+ */
+ if (bms_overlap(path->parent->lateral_relids,
+ child_rel->top_parent_relids))
+ {
+ new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
+ ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
+ }
+
+ return new_path;
+}
+
+/*
+ * reparameterize_pathlist_by_child
+ * Helper function to reparameterize a list of paths by given child rel.
+ */
+static List *
+reparameterize_pathlist_by_child(PlannerInfo *root,
+ List *pathlist,
+ RelOptInfo *child_rel)
+{
+ ListCell *lc;
+ List *result = NIL;
+
+ foreach(lc, pathlist)
+ {
+ Path *path = reparameterize_path_by_child(root, lfirst(lc),
+ child_rel);
+
+ if (path == NULL)
+ {
+ list_free(result);
+ return NIL;
+ }
+
+ result = lappend(result, path);
+ }
+
+ return result;
+}
diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c
new file mode 100644
index 0000000..3b0f058
--- /dev/null
+++ b/src/backend/optimizer/util/placeholder.c
@@ -0,0 +1,477 @@
+/*-------------------------------------------------------------------------
+ *
+ * placeholder.c
+ * PlaceHolderVar and PlaceHolderInfo manipulation routines
+ *
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/placeholder.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "nodes/nodeFuncs.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/placeholder.h"
+#include "optimizer/planmain.h"
+#include "utils/lsyscache.h"
+
+/* Local functions */
+static void find_placeholders_recurse(PlannerInfo *root, Node *jtnode);
+static void find_placeholders_in_expr(PlannerInfo *root, Node *expr);
+
+
+/*
+ * make_placeholder_expr
+ * Make a PlaceHolderVar for the given expression.
+ *
+ * phrels is the syntactic location (as a set of baserels) to attribute
+ * to the expression.
+ */
+PlaceHolderVar *
+make_placeholder_expr(PlannerInfo *root, Expr *expr, Relids phrels)
+{
+ PlaceHolderVar *phv = makeNode(PlaceHolderVar);
+
+ phv->phexpr = expr;
+ phv->phrels = phrels;
+ phv->phid = ++(root->glob->lastPHId);
+ phv->phlevelsup = 0;
+
+ return phv;
+}
+
+/*
+ * find_placeholder_info
+ * Fetch the PlaceHolderInfo for the given PHV
+ *
+ * If the PlaceHolderInfo doesn't exist yet, create it if create_new_ph is
+ * true, else throw an error.
+ *
+ * This is separate from make_placeholder_expr because subquery pullup has
+ * to make PlaceHolderVars for expressions that might not be used at all in
+ * the upper query, or might not remain after const-expression simplification.
+ * We build PlaceHolderInfos only for PHVs that are still present in the
+ * simplified query passed to query_planner().
+ *
+ * Note: this should only be called after query_planner() has started. Also,
+ * create_new_ph must not be true after deconstruct_jointree begins, because
+ * make_outerjoininfo assumes that we already know about all placeholders.
+ */
+PlaceHolderInfo *
+find_placeholder_info(PlannerInfo *root, PlaceHolderVar *phv,
+ bool create_new_ph)
+{
+ PlaceHolderInfo *phinfo;
+ Relids rels_used;
+ ListCell *lc;
+
+ /* if this ever isn't true, we'd need to be able to look in parent lists */
+ Assert(phv->phlevelsup == 0);
+
+ foreach(lc, root->placeholder_list)
+ {
+ phinfo = (PlaceHolderInfo *) lfirst(lc);
+ if (phinfo->phid == phv->phid)
+ return phinfo;
+ }
+
+ /* Not found, so create it */
+ if (!create_new_ph)
+ elog(ERROR, "too late to create a new PlaceHolderInfo");
+
+ phinfo = makeNode(PlaceHolderInfo);
+
+ phinfo->phid = phv->phid;
+ phinfo->ph_var = copyObject(phv);
+
+ /*
+ * Any referenced rels that are outside the PHV's syntactic scope are
+ * LATERAL references, which should be included in ph_lateral but not in
+ * ph_eval_at. If no referenced rels are within the syntactic scope,
+ * force evaluation at the syntactic location.
+ */
+ rels_used = pull_varnos(root, (Node *) phv->phexpr);
+ phinfo->ph_lateral = bms_difference(rels_used, phv->phrels);
+ if (bms_is_empty(phinfo->ph_lateral))
+ phinfo->ph_lateral = NULL; /* make it exactly NULL if empty */
+ phinfo->ph_eval_at = bms_int_members(rels_used, phv->phrels);
+ /* If no contained vars, force evaluation at syntactic location */
+ if (bms_is_empty(phinfo->ph_eval_at))
+ {
+ phinfo->ph_eval_at = bms_copy(phv->phrels);
+ Assert(!bms_is_empty(phinfo->ph_eval_at));
+ }
+ /* ph_eval_at may change later, see update_placeholder_eval_levels */
+ phinfo->ph_needed = NULL; /* initially it's unused */
+ /* for the moment, estimate width using just the datatype info */
+ phinfo->ph_width = get_typavgwidth(exprType((Node *) phv->phexpr),
+ exprTypmod((Node *) phv->phexpr));
+
+ root->placeholder_list = lappend(root->placeholder_list, phinfo);
+
+ /*
+ * The PHV's contained expression may contain other, lower-level PHVs. We
+ * now know we need to get those into the PlaceHolderInfo list, too, so we
+ * may as well do that immediately.
+ */
+ find_placeholders_in_expr(root, (Node *) phinfo->ph_var->phexpr);
+
+ return phinfo;
+}
+
+/*
+ * find_placeholders_in_jointree
+ * Search the jointree for PlaceHolderVars, and build PlaceHolderInfos
+ *
+ * We don't need to look at the targetlist because build_base_rel_tlists()
+ * will already have made entries for any PHVs in the tlist.
+ *
+ * This is called before we begin deconstruct_jointree. Once we begin
+ * deconstruct_jointree, all active placeholders must be present in
+ * root->placeholder_list, because make_outerjoininfo and
+ * update_placeholder_eval_levels require this info to be available
+ * while we crawl up the join tree.
+ */
+void
+find_placeholders_in_jointree(PlannerInfo *root)
+{
+ /* We need do nothing if the query contains no PlaceHolderVars */
+ if (root->glob->lastPHId != 0)
+ {
+ /* Start recursion at top of jointree */
+ Assert(root->parse->jointree != NULL &&
+ IsA(root->parse->jointree, FromExpr));
+ find_placeholders_recurse(root, (Node *) root->parse->jointree);
+ }
+}
+
+/*
+ * find_placeholders_recurse
+ * One recursion level of find_placeholders_in_jointree.
+ *
+ * jtnode is the current jointree node to examine.
+ */
+static void
+find_placeholders_recurse(PlannerInfo *root, Node *jtnode)
+{
+ if (jtnode == NULL)
+ return;
+ if (IsA(jtnode, RangeTblRef))
+ {
+ /* No quals to deal with here */
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ ListCell *l;
+
+ /*
+ * First, recurse to handle child joins.
+ */
+ foreach(l, f->fromlist)
+ {
+ find_placeholders_recurse(root, lfirst(l));
+ }
+
+ /*
+ * Now process the top-level quals.
+ */
+ find_placeholders_in_expr(root, f->quals);
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+
+ /*
+ * First, recurse to handle child joins.
+ */
+ find_placeholders_recurse(root, j->larg);
+ find_placeholders_recurse(root, j->rarg);
+
+ /* Process the qual clauses */
+ find_placeholders_in_expr(root, j->quals);
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+}
+
+/*
+ * find_placeholders_in_expr
+ * Find all PlaceHolderVars in the given expression, and create
+ * PlaceHolderInfo entries for them.
+ */
+static void
+find_placeholders_in_expr(PlannerInfo *root, Node *expr)
+{
+ List *vars;
+ ListCell *vl;
+
+ /*
+ * pull_var_clause does more than we need here, but it'll do and it's
+ * convenient to use.
+ */
+ vars = pull_var_clause(expr,
+ PVC_RECURSE_AGGREGATES |
+ PVC_RECURSE_WINDOWFUNCS |
+ PVC_INCLUDE_PLACEHOLDERS);
+ foreach(vl, vars)
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) lfirst(vl);
+
+ /* Ignore any plain Vars */
+ if (!IsA(phv, PlaceHolderVar))
+ continue;
+
+ /* Create a PlaceHolderInfo entry if there's not one already */
+ (void) find_placeholder_info(root, phv, true);
+ }
+ list_free(vars);
+}
+
+/*
+ * update_placeholder_eval_levels
+ * Adjust the target evaluation levels for placeholders
+ *
+ * The initial eval_at level set by find_placeholder_info was the set of
+ * rels used in the placeholder's expression (or the whole subselect below
+ * the placeholder's syntactic location, if the expr is variable-free).
+ * If the query contains any outer joins that can null any of those rels,
+ * we must delay evaluation to above those joins.
+ *
+ * We repeat this operation each time we add another outer join to
+ * root->join_info_list. It's somewhat annoying to have to do that, but
+ * since we don't have very much information on the placeholders' locations,
+ * it's hard to avoid. Each placeholder's eval_at level must be correct
+ * by the time it starts to figure in outer-join delay decisions for higher
+ * outer joins.
+ *
+ * In future we might want to put additional policy/heuristics here to
+ * try to determine an optimal evaluation level. The current rules will
+ * result in evaluation at the lowest possible level. However, pushing a
+ * placeholder eval up the tree is likely to further constrain evaluation
+ * order for outer joins, so it could easily be counterproductive; and we
+ * don't have enough information at this point to make an intelligent choice.
+ */
+void
+update_placeholder_eval_levels(PlannerInfo *root, SpecialJoinInfo *new_sjinfo)
+{
+ ListCell *lc1;
+
+ foreach(lc1, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc1);
+ Relids syn_level = phinfo->ph_var->phrels;
+ Relids eval_at;
+ bool found_some;
+ ListCell *lc2;
+
+ /*
+ * We don't need to do any work on this placeholder unless the
+ * newly-added outer join is syntactically beneath its location.
+ */
+ if (!bms_is_subset(new_sjinfo->syn_lefthand, syn_level) ||
+ !bms_is_subset(new_sjinfo->syn_righthand, syn_level))
+ continue;
+
+ /*
+ * Check for delays due to lower outer joins. This is the same logic
+ * as in check_outerjoin_delay in initsplan.c, except that we don't
+ * have anything to do with the delay_upper_joins flags; delay of
+ * upper outer joins will be handled later, based on the eval_at
+ * values we compute now.
+ */
+ eval_at = phinfo->ph_eval_at;
+
+ do
+ {
+ found_some = false;
+ foreach(lc2, root->join_info_list)
+ {
+ SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc2);
+
+ /* disregard joins not within the PHV's sub-select */
+ if (!bms_is_subset(sjinfo->syn_lefthand, syn_level) ||
+ !bms_is_subset(sjinfo->syn_righthand, syn_level))
+ continue;
+
+ /* do we reference any nullable rels of this OJ? */
+ if (bms_overlap(eval_at, sjinfo->min_righthand) ||
+ (sjinfo->jointype == JOIN_FULL &&
+ bms_overlap(eval_at, sjinfo->min_lefthand)))
+ {
+ /* yes; have we included all its rels in eval_at? */
+ if (!bms_is_subset(sjinfo->min_lefthand, eval_at) ||
+ !bms_is_subset(sjinfo->min_righthand, eval_at))
+ {
+ /* no, so add them in */
+ eval_at = bms_add_members(eval_at,
+ sjinfo->min_lefthand);
+ eval_at = bms_add_members(eval_at,
+ sjinfo->min_righthand);
+ /* we'll need another iteration */
+ found_some = true;
+ }
+ }
+ }
+ } while (found_some);
+
+ /* Can't move the PHV's eval_at level to above its syntactic level */
+ Assert(bms_is_subset(eval_at, syn_level));
+
+ phinfo->ph_eval_at = eval_at;
+ }
+}
+
+/*
+ * fix_placeholder_input_needed_levels
+ * Adjust the "needed at" levels for placeholder inputs
+ *
+ * This is called after we've finished determining the eval_at levels for
+ * all placeholders. We need to make sure that all vars and placeholders
+ * needed to evaluate each placeholder will be available at the scan or join
+ * level where the evaluation will be done. (It might seem that scan-level
+ * evaluations aren't interesting, but that's not so: a LATERAL reference
+ * within a placeholder's expression needs to cause the referenced var or
+ * placeholder to be marked as needed in the scan where it's evaluated.)
+ * Note that this loop can have side-effects on the ph_needed sets of other
+ * PlaceHolderInfos; that's okay because we don't examine ph_needed here, so
+ * there are no ordering issues to worry about.
+ */
+void
+fix_placeholder_input_needed_levels(PlannerInfo *root)
+{
+ ListCell *lc;
+
+ foreach(lc, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
+ List *vars = pull_var_clause((Node *) phinfo->ph_var->phexpr,
+ PVC_RECURSE_AGGREGATES |
+ PVC_RECURSE_WINDOWFUNCS |
+ PVC_INCLUDE_PLACEHOLDERS);
+
+ add_vars_to_targetlist(root, vars, phinfo->ph_eval_at, false);
+ list_free(vars);
+ }
+}
+
+/*
+ * add_placeholders_to_base_rels
+ * Add any required PlaceHolderVars to base rels' targetlists.
+ *
+ * If any placeholder can be computed at a base rel and is needed above it,
+ * add it to that rel's targetlist. This might look like it could be merged
+ * with fix_placeholder_input_needed_levels, but it must be separate because
+ * join removal happens in between, and can change the ph_eval_at sets. There
+ * is essentially the same logic in add_placeholders_to_joinrel, but we can't
+ * do that part until joinrels are formed.
+ */
+void
+add_placeholders_to_base_rels(PlannerInfo *root)
+{
+ ListCell *lc;
+
+ foreach(lc, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
+ Relids eval_at = phinfo->ph_eval_at;
+ int varno;
+
+ if (bms_get_singleton_member(eval_at, &varno) &&
+ bms_nonempty_difference(phinfo->ph_needed, eval_at))
+ {
+ RelOptInfo *rel = find_base_rel(root, varno);
+
+ rel->reltarget->exprs = lappend(rel->reltarget->exprs,
+ copyObject(phinfo->ph_var));
+ /* reltarget's cost and width fields will be updated later */
+ }
+ }
+}
+
+/*
+ * add_placeholders_to_joinrel
+ * Add any required PlaceHolderVars to a join rel's targetlist;
+ * and if they contain lateral references, add those references to the
+ * joinrel's direct_lateral_relids.
+ *
+ * A join rel should emit a PlaceHolderVar if (a) the PHV can be computed
+ * at or below this join level and (b) the PHV is needed above this level.
+ * However, condition (a) is sufficient to add to direct_lateral_relids,
+ * as explained below.
+ */
+void
+add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel,
+ RelOptInfo *outer_rel, RelOptInfo *inner_rel)
+{
+ Relids relids = joinrel->relids;
+ ListCell *lc;
+
+ foreach(lc, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
+
+ /* Is it computable here? */
+ if (bms_is_subset(phinfo->ph_eval_at, relids))
+ {
+ /* Is it still needed above this joinrel? */
+ if (bms_nonempty_difference(phinfo->ph_needed, relids))
+ {
+ /* Yup, add it to the output */
+ joinrel->reltarget->exprs = lappend(joinrel->reltarget->exprs,
+ phinfo->ph_var);
+ joinrel->reltarget->width += phinfo->ph_width;
+
+ /*
+ * Charge the cost of evaluating the contained expression if
+ * the PHV can be computed here but not in either input. This
+ * is a bit bogus because we make the decision based on the
+ * first pair of possible input relations considered for the
+ * joinrel. With other pairs, it might be possible to compute
+ * the PHV in one input or the other, and then we'd be double
+ * charging the PHV's cost for some join paths. For now, live
+ * with that; but we might want to improve it later by
+ * refiguring the reltarget costs for each pair of inputs.
+ */
+ if (!bms_is_subset(phinfo->ph_eval_at, outer_rel->relids) &&
+ !bms_is_subset(phinfo->ph_eval_at, inner_rel->relids))
+ {
+ QualCost cost;
+
+ cost_qual_eval_node(&cost, (Node *) phinfo->ph_var->phexpr,
+ root);
+ joinrel->reltarget->cost.startup += cost.startup;
+ joinrel->reltarget->cost.per_tuple += cost.per_tuple;
+ }
+ }
+
+ /*
+ * Also adjust joinrel's direct_lateral_relids to include the
+ * PHV's source rel(s). We must do this even if we're not
+ * actually going to emit the PHV, otherwise join_is_legal() will
+ * reject valid join orderings. (In principle maybe we could
+ * instead remove the joinrel's lateral_relids dependency; but
+ * that's complicated to get right, and cases where we're not
+ * going to emit the PHV are too rare to justify the work.)
+ *
+ * In principle we should only do this if the join doesn't yet
+ * include the PHV's source rel(s). But our caller
+ * build_join_rel() will clean things up by removing the join's
+ * own relids from its direct_lateral_relids, so we needn't
+ * account for that here.
+ */
+ joinrel->direct_lateral_relids =
+ bms_add_members(joinrel->direct_lateral_relids,
+ phinfo->ph_lateral);
+ }
+ }
+}
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
new file mode 100644
index 0000000..419f2ac
--- /dev/null
+++ b/src/backend/optimizer/util/plancat.c
@@ -0,0 +1,2509 @@
+/*-------------------------------------------------------------------------
+ *
+ * plancat.c
+ * routines for accessing the system catalogs
+ *
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/plancat.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <math.h>
+
+#include "access/genam.h"
+#include "access/htup_details.h"
+#include "access/nbtree.h"
+#include "access/sysattr.h"
+#include "access/table.h"
+#include "access/tableam.h"
+#include "access/transam.h"
+#include "access/xlog.h"
+#include "catalog/catalog.h"
+#include "catalog/heap.h"
+#include "catalog/pg_am.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_statistic_ext.h"
+#include "catalog/pg_statistic_ext_data.h"
+#include "foreign/fdwapi.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "nodes/supportnodes.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/plancat.h"
+#include "optimizer/prep.h"
+#include "parser/parse_relation.h"
+#include "parser/parsetree.h"
+#include "partitioning/partdesc.h"
+#include "rewrite/rewriteManip.h"
+#include "statistics/statistics.h"
+#include "storage/bufmgr.h"
+#include "utils/builtins.h"
+#include "utils/lsyscache.h"
+#include "utils/partcache.h"
+#include "utils/rel.h"
+#include "utils/snapmgr.h"
+#include "utils/syscache.h"
+
+/* GUC parameter */
+int constraint_exclusion = CONSTRAINT_EXCLUSION_PARTITION;
+
+/* Hook for plugins to get control in get_relation_info() */
+get_relation_info_hook_type get_relation_info_hook = NULL;
+
+
+static void get_relation_foreign_keys(PlannerInfo *root, RelOptInfo *rel,
+ Relation relation, bool inhparent);
+static bool infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
+ List *idxExprs);
+static List *get_relation_constraints(PlannerInfo *root,
+ Oid relationObjectId, RelOptInfo *rel,
+ bool include_noinherit,
+ bool include_notnull,
+ bool include_partition);
+static List *build_index_tlist(PlannerInfo *root, IndexOptInfo *index,
+ Relation heapRelation);
+static List *get_relation_statistics(RelOptInfo *rel, Relation relation);
+static void set_relation_partition_info(PlannerInfo *root, RelOptInfo *rel,
+ Relation relation);
+static PartitionScheme find_partition_scheme(PlannerInfo *root, Relation rel);
+static void set_baserel_partition_key_exprs(Relation relation,
+ RelOptInfo *rel);
+static void set_baserel_partition_constraint(Relation relation,
+ RelOptInfo *rel);
+
+
+/*
+ * get_relation_info -
+ * Retrieves catalog information for a given relation.
+ *
+ * Given the Oid of the relation, return the following info into fields
+ * of the RelOptInfo struct:
+ *
+ * min_attr lowest valid AttrNumber
+ * max_attr highest valid AttrNumber
+ * indexlist list of IndexOptInfos for relation's indexes
+ * statlist list of StatisticExtInfo for relation's statistic objects
+ * serverid if it's a foreign table, the server OID
+ * fdwroutine if it's a foreign table, the FDW function pointers
+ * pages number of pages
+ * tuples number of tuples
+ * rel_parallel_workers user-defined number of parallel workers
+ *
+ * Also, add information about the relation's foreign keys to root->fkey_list.
+ *
+ * Also, initialize the attr_needed[] and attr_widths[] arrays. In most
+ * cases these are left as zeroes, but sometimes we need to compute attr
+ * widths here, and we may as well cache the results for costsize.c.
+ *
+ * If inhparent is true, all we need to do is set up the attr arrays:
+ * the RelOptInfo actually represents the appendrel formed by an inheritance
+ * tree, and so the parent rel's physical size and index information isn't
+ * important for it.
+ */
+void
+get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
+ RelOptInfo *rel)
+{
+ Index varno = rel->relid;
+ Relation relation;
+ bool hasindex;
+ List *indexinfos = NIL;
+
+ /*
+ * We need not lock the relation since it was already locked, either by
+ * the rewriter or when expand_inherited_rtentry() added it to the query's
+ * rangetable.
+ */
+ relation = table_open(relationObjectId, NoLock);
+
+ /*
+ * Relations without a table AM can be used in a query only if they are of
+ * special-cased relkinds. This check prevents us from crashing later if,
+ * for example, a view's ON SELECT rule has gone missing. Note that
+ * table_open() already rejected indexes and composite types; spell the
+ * error the same way it does.
+ */
+ if (!relation->rd_tableam)
+ {
+ if (!(relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE ||
+ relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE))
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("cannot open relation \"%s\"",
+ RelationGetRelationName(relation)),
+ errdetail_relkind_not_supported(relation->rd_rel->relkind)));
+ }
+
+ /* Temporary and unlogged relations are inaccessible during recovery. */
+ if (!RelationIsPermanent(relation) && RecoveryInProgress())
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot access temporary or unlogged relations during recovery")));
+
+ rel->min_attr = FirstLowInvalidHeapAttributeNumber + 1;
+ rel->max_attr = RelationGetNumberOfAttributes(relation);
+ rel->reltablespace = RelationGetForm(relation)->reltablespace;
+
+ Assert(rel->max_attr >= rel->min_attr);
+ rel->attr_needed = (Relids *)
+ palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids));
+ rel->attr_widths = (int32 *)
+ palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));
+
+ /*
+ * Estimate relation size --- unless it's an inheritance parent, in which
+ * case the size we want is not the rel's own size but the size of its
+ * inheritance tree. That will be computed in set_append_rel_size().
+ */
+ if (!inhparent)
+ estimate_rel_size(relation, rel->attr_widths - rel->min_attr,
+ &rel->pages, &rel->tuples, &rel->allvisfrac);
+
+ /* Retrieve the parallel_workers reloption, or -1 if not set. */
+ rel->rel_parallel_workers = RelationGetParallelWorkers(relation, -1);
+
+ /*
+ * Make list of indexes. Ignore indexes on system catalogs if told to.
+ * Don't bother with indexes for an inheritance parent, either.
+ */
+ if (inhparent ||
+ (IgnoreSystemIndexes && IsSystemRelation(relation)))
+ hasindex = false;
+ else
+ hasindex = relation->rd_rel->relhasindex;
+
+ if (hasindex)
+ {
+ List *indexoidlist;
+ LOCKMODE lmode;
+ ListCell *l;
+
+ indexoidlist = RelationGetIndexList(relation);
+
+ /*
+ * For each index, we get the same type of lock that the executor will
+ * need, and do not release it. This saves a couple of trips to the
+ * shared lock manager while not creating any real loss of
+ * concurrency, because no schema changes could be happening on the
+ * index while we hold lock on the parent rel, and no lock type used
+ * for queries blocks any other kind of index operation.
+ */
+ lmode = root->simple_rte_array[varno]->rellockmode;
+
+ foreach(l, indexoidlist)
+ {
+ Oid indexoid = lfirst_oid(l);
+ Relation indexRelation;
+ Form_pg_index index;
+ IndexAmRoutine *amroutine;
+ IndexOptInfo *info;
+ int ncolumns,
+ nkeycolumns;
+ int i;
+
+ /*
+ * Extract info from the relation descriptor for the index.
+ */
+ indexRelation = index_open(indexoid, lmode);
+ index = indexRelation->rd_index;
+
+ /*
+ * Ignore invalid indexes, since they can't safely be used for
+ * queries. Note that this is OK because the data structure we
+ * are constructing is only used by the planner --- the executor
+ * still needs to insert into "invalid" indexes, if they're marked
+ * indisready.
+ */
+ if (!index->indisvalid)
+ {
+ index_close(indexRelation, NoLock);
+ continue;
+ }
+
+ /*
+ * Ignore partitioned indexes, since they are not usable for
+ * queries.
+ */
+ if (indexRelation->rd_rel->relkind == RELKIND_PARTITIONED_INDEX)
+ {
+ index_close(indexRelation, NoLock);
+ continue;
+ }
+
+ /*
+ * If the index is valid, but cannot yet be used, ignore it; but
+ * mark the plan we are generating as transient. See
+ * src/backend/access/heap/README.HOT for discussion.
+ */
+ if (index->indcheckxmin &&
+ !TransactionIdPrecedes(HeapTupleHeaderGetXmin(indexRelation->rd_indextuple->t_data),
+ TransactionXmin))
+ {
+ root->glob->transientPlan = true;
+ index_close(indexRelation, NoLock);
+ continue;
+ }
+
+ info = makeNode(IndexOptInfo);
+
+ info->indexoid = index->indexrelid;
+ info->reltablespace =
+ RelationGetForm(indexRelation)->reltablespace;
+ info->rel = rel;
+ info->ncolumns = ncolumns = index->indnatts;
+ info->nkeycolumns = nkeycolumns = index->indnkeyatts;
+
+ info->indexkeys = (int *) palloc(sizeof(int) * ncolumns);
+ info->indexcollations = (Oid *) palloc(sizeof(Oid) * nkeycolumns);
+ info->opfamily = (Oid *) palloc(sizeof(Oid) * nkeycolumns);
+ info->opcintype = (Oid *) palloc(sizeof(Oid) * nkeycolumns);
+ info->canreturn = (bool *) palloc(sizeof(bool) * ncolumns);
+
+ for (i = 0; i < ncolumns; i++)
+ {
+ info->indexkeys[i] = index->indkey.values[i];
+ info->canreturn[i] = index_can_return(indexRelation, i + 1);
+ }
+
+ for (i = 0; i < nkeycolumns; i++)
+ {
+ info->opfamily[i] = indexRelation->rd_opfamily[i];
+ info->opcintype[i] = indexRelation->rd_opcintype[i];
+ info->indexcollations[i] = indexRelation->rd_indcollation[i];
+ }
+
+ info->relam = indexRelation->rd_rel->relam;
+
+ /* We copy just the fields we need, not all of rd_indam */
+ amroutine = indexRelation->rd_indam;
+ info->amcanorderbyop = amroutine->amcanorderbyop;
+ info->amoptionalkey = amroutine->amoptionalkey;
+ info->amsearcharray = amroutine->amsearcharray;
+ info->amsearchnulls = amroutine->amsearchnulls;
+ info->amcanparallel = amroutine->amcanparallel;
+ info->amhasgettuple = (amroutine->amgettuple != NULL);
+ info->amhasgetbitmap = amroutine->amgetbitmap != NULL &&
+ relation->rd_tableam->scan_bitmap_next_block != NULL;
+ info->amcanmarkpos = (amroutine->ammarkpos != NULL &&
+ amroutine->amrestrpos != NULL);
+ info->amcostestimate = amroutine->amcostestimate;
+ Assert(info->amcostestimate != NULL);
+
+ /* Fetch index opclass options */
+ info->opclassoptions = RelationGetIndexAttOptions(indexRelation, true);
+
+ /*
+ * Fetch the ordering information for the index, if any.
+ */
+ if (info->relam == BTREE_AM_OID)
+ {
+ /*
+ * If it's a btree index, we can use its opfamily OIDs
+ * directly as the sort ordering opfamily OIDs.
+ */
+ Assert(amroutine->amcanorder);
+
+ info->sortopfamily = info->opfamily;
+ info->reverse_sort = (bool *) palloc(sizeof(bool) * nkeycolumns);
+ info->nulls_first = (bool *) palloc(sizeof(bool) * nkeycolumns);
+
+ for (i = 0; i < nkeycolumns; i++)
+ {
+ int16 opt = indexRelation->rd_indoption[i];
+
+ info->reverse_sort[i] = (opt & INDOPTION_DESC) != 0;
+ info->nulls_first[i] = (opt & INDOPTION_NULLS_FIRST) != 0;
+ }
+ }
+ else if (amroutine->amcanorder)
+ {
+ /*
+ * Otherwise, identify the corresponding btree opfamilies by
+ * trying to map this index's "<" operators into btree. Since
+ * "<" uniquely defines the behavior of a sort order, this is
+ * a sufficient test.
+ *
+ * XXX This method is rather slow and also requires the
+ * undesirable assumption that the other index AM numbers its
+ * strategies the same as btree. It'd be better to have a way
+ * to explicitly declare the corresponding btree opfamily for
+ * each opfamily of the other index type. But given the lack
+ * of current or foreseeable amcanorder index types, it's not
+ * worth expending more effort on now.
+ */
+ info->sortopfamily = (Oid *) palloc(sizeof(Oid) * nkeycolumns);
+ info->reverse_sort = (bool *) palloc(sizeof(bool) * nkeycolumns);
+ info->nulls_first = (bool *) palloc(sizeof(bool) * nkeycolumns);
+
+ for (i = 0; i < nkeycolumns; i++)
+ {
+ int16 opt = indexRelation->rd_indoption[i];
+ Oid ltopr;
+ Oid btopfamily;
+ Oid btopcintype;
+ int16 btstrategy;
+
+ info->reverse_sort[i] = (opt & INDOPTION_DESC) != 0;
+ info->nulls_first[i] = (opt & INDOPTION_NULLS_FIRST) != 0;
+
+ ltopr = get_opfamily_member(info->opfamily[i],
+ info->opcintype[i],
+ info->opcintype[i],
+ BTLessStrategyNumber);
+ if (OidIsValid(ltopr) &&
+ get_ordering_op_properties(ltopr,
+ &btopfamily,
+ &btopcintype,
+ &btstrategy) &&
+ btopcintype == info->opcintype[i] &&
+ btstrategy == BTLessStrategyNumber)
+ {
+ /* Successful mapping */
+ info->sortopfamily[i] = btopfamily;
+ }
+ else
+ {
+ /* Fail ... quietly treat index as unordered */
+ info->sortopfamily = NULL;
+ info->reverse_sort = NULL;
+ info->nulls_first = NULL;
+ break;
+ }
+ }
+ }
+ else
+ {
+ info->sortopfamily = NULL;
+ info->reverse_sort = NULL;
+ info->nulls_first = NULL;
+ }
+
+ /*
+ * Fetch the index expressions and predicate, if any. We must
+ * modify the copies we obtain from the relcache to have the
+ * correct varno for the parent relation, so that they match up
+ * correctly against qual clauses.
+ */
+ info->indexprs = RelationGetIndexExpressions(indexRelation);
+ info->indpred = RelationGetIndexPredicate(indexRelation);
+ if (info->indexprs && varno != 1)
+ ChangeVarNodes((Node *) info->indexprs, 1, varno, 0);
+ if (info->indpred && varno != 1)
+ ChangeVarNodes((Node *) info->indpred, 1, varno, 0);
+
+ /* Build targetlist using the completed indexprs data */
+ info->indextlist = build_index_tlist(root, info, relation);
+
+ info->indrestrictinfo = NIL; /* set later, in indxpath.c */
+ info->predOK = false; /* set later, in indxpath.c */
+ info->unique = index->indisunique;
+ info->immediate = index->indimmediate;
+ info->hypothetical = false;
+
+ /*
+ * Estimate the index size. If it's not a partial index, we lock
+ * the number-of-tuples estimate to equal the parent table; if it
+ * is partial then we have to use the same methods as we would for
+ * a table, except we can be sure that the index is not larger
+ * than the table.
+ */
+ if (info->indpred == NIL)
+ {
+ info->pages = RelationGetNumberOfBlocks(indexRelation);
+ info->tuples = rel->tuples;
+ }
+ else
+ {
+ double allvisfrac; /* dummy */
+
+ estimate_rel_size(indexRelation, NULL,
+ &info->pages, &info->tuples, &allvisfrac);
+ if (info->tuples > rel->tuples)
+ info->tuples = rel->tuples;
+ }
+
+ if (info->relam == BTREE_AM_OID)
+ {
+ /* For btrees, get tree height while we have the index open */
+ info->tree_height = _bt_getrootheight(indexRelation);
+ }
+ else
+ {
+ /* For other index types, just set it to "unknown" for now */
+ info->tree_height = -1;
+ }
+
+ index_close(indexRelation, NoLock);
+
+ /*
+ * We've historically used lcons() here. It'd make more sense to
+ * use lappend(), but that causes the planner to change behavior
+ * in cases where two indexes seem equally attractive. For now,
+ * stick with lcons() --- few tables should have so many indexes
+ * that the O(N^2) behavior of lcons() is really a problem.
+ */
+ indexinfos = lcons(info, indexinfos);
+ }
+
+ list_free(indexoidlist);
+ }
+
+ rel->indexlist = indexinfos;
+
+ rel->statlist = get_relation_statistics(rel, relation);
+
+ /* Grab foreign-table info using the relcache, while we have it */
+ if (relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ rel->serverid = GetForeignServerIdByRelId(RelationGetRelid(relation));
+ rel->fdwroutine = GetFdwRoutineForRelation(relation, true);
+ }
+ else
+ {
+ rel->serverid = InvalidOid;
+ rel->fdwroutine = NULL;
+ }
+
+ /* Collect info about relation's foreign keys, if relevant */
+ get_relation_foreign_keys(root, rel, relation, inhparent);
+
+ /* Collect info about functions implemented by the rel's table AM. */
+ if (relation->rd_tableam &&
+ relation->rd_tableam->scan_set_tidrange != NULL &&
+ relation->rd_tableam->scan_getnextslot_tidrange != NULL)
+ rel->amflags |= AMFLAG_HAS_TID_RANGE;
+
+ /*
+ * Collect info about relation's partitioning scheme, if any. Only
+ * inheritance parents may be partitioned.
+ */
+ if (inhparent && relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
+ set_relation_partition_info(root, rel, relation);
+
+ table_close(relation, NoLock);
+
+ /*
+ * Allow a plugin to editorialize on the info we obtained from the
+ * catalogs. Actions might include altering the assumed relation size,
+ * removing an index, or adding a hypothetical index to the indexlist.
+ */
+ if (get_relation_info_hook)
+ (*get_relation_info_hook) (root, relationObjectId, inhparent, rel);
+}
+
+/*
+ * get_relation_foreign_keys -
+ * Retrieves foreign key information for a given relation.
+ *
+ * ForeignKeyOptInfos for relevant foreign keys are created and added to
+ * root->fkey_list. We do this now while we have the relcache entry open.
+ * We could sometimes avoid making useless ForeignKeyOptInfos if we waited
+ * until all RelOptInfos have been built, but the cost of re-opening the
+ * relcache entries would probably exceed any savings.
+ */
+static void
+get_relation_foreign_keys(PlannerInfo *root, RelOptInfo *rel,
+ Relation relation, bool inhparent)
+{
+ List *rtable = root->parse->rtable;
+ List *cachedfkeys;
+ ListCell *lc;
+
+ /*
+ * If it's not a baserel, we don't care about its FKs. Also, if the query
+ * references only a single relation, we can skip the lookup since no FKs
+ * could satisfy the requirements below.
+ */
+ if (rel->reloptkind != RELOPT_BASEREL ||
+ list_length(rtable) < 2)
+ return;
+
+ /*
+ * If it's the parent of an inheritance tree, ignore its FKs. We could
+ * make useful FK-based deductions if we found that all members of the
+ * inheritance tree have equivalent FK constraints, but detecting that
+ * would require code that hasn't been written.
+ */
+ if (inhparent)
+ return;
+
+ /*
+ * Extract data about relation's FKs from the relcache. Note that this
+ * list belongs to the relcache and might disappear in a cache flush, so
+ * we must not do any further catalog access within this function.
+ */
+ cachedfkeys = RelationGetFKeyList(relation);
+
+ /*
+ * Figure out which FKs are of interest for this query, and create
+ * ForeignKeyOptInfos for them. We want only FKs that reference some
+ * other RTE of the current query. In queries containing self-joins,
+ * there might be more than one other RTE for a referenced table, and we
+ * should make a ForeignKeyOptInfo for each occurrence.
+ *
+ * Ideally, we would ignore RTEs that correspond to non-baserels, but it's
+ * too hard to identify those here, so we might end up making some useless
+ * ForeignKeyOptInfos. If so, match_foreign_keys_to_quals() will remove
+ * them again.
+ */
+ foreach(lc, cachedfkeys)
+ {
+ ForeignKeyCacheInfo *cachedfk = (ForeignKeyCacheInfo *) lfirst(lc);
+ Index rti;
+ ListCell *lc2;
+
+ /* conrelid should always be that of the table we're considering */
+ Assert(cachedfk->conrelid == RelationGetRelid(relation));
+
+ /* Scan to find other RTEs matching confrelid */
+ rti = 0;
+ foreach(lc2, rtable)
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc2);
+ ForeignKeyOptInfo *info;
+
+ rti++;
+ /* Ignore if not the correct table */
+ if (rte->rtekind != RTE_RELATION ||
+ rte->relid != cachedfk->confrelid)
+ continue;
+ /* Ignore if it's an inheritance parent; doesn't really match */
+ if (rte->inh)
+ continue;
+ /* Ignore self-referential FKs; we only care about joins */
+ if (rti == rel->relid)
+ continue;
+
+ /* OK, let's make an entry */
+ info = makeNode(ForeignKeyOptInfo);
+ info->con_relid = rel->relid;
+ info->ref_relid = rti;
+ info->nkeys = cachedfk->nkeys;
+ memcpy(info->conkey, cachedfk->conkey, sizeof(info->conkey));
+ memcpy(info->confkey, cachedfk->confkey, sizeof(info->confkey));
+ memcpy(info->conpfeqop, cachedfk->conpfeqop, sizeof(info->conpfeqop));
+ /* zero out fields to be filled by match_foreign_keys_to_quals */
+ info->nmatched_ec = 0;
+ info->nconst_ec = 0;
+ info->nmatched_rcols = 0;
+ info->nmatched_ri = 0;
+ memset(info->eclass, 0, sizeof(info->eclass));
+ memset(info->fk_eclass_member, 0, sizeof(info->fk_eclass_member));
+ memset(info->rinfos, 0, sizeof(info->rinfos));
+
+ root->fkey_list = lappend(root->fkey_list, info);
+ }
+ }
+}
+
+/*
+ * infer_arbiter_indexes -
+ * Determine the unique indexes used to arbitrate speculative insertion.
+ *
+ * Uses user-supplied inference clause expressions and predicate to match a
+ * unique index from those defined and ready on the heap relation (target).
+ * An exact match is required on columns/expressions (although they can appear
+ * in any order). However, the predicate given by the user need only restrict
+ * insertion to a subset of some part of the table covered by some particular
+ * unique index (in particular, a partial unique index) in order to be
+ * inferred.
+ *
+ * The implementation does not consider which B-Tree operator class any
+ * particular available unique index attribute uses, unless one was specified
+ * in the inference specification. The same is true of collations. In
+ * particular, there is no system dependency on the default operator class for
+ * the purposes of inference. If no opclass (or collation) is specified, then
+ * all matching indexes (that may or may not match the default in terms of
+ * each attribute opclass/collation) are used for inference.
+ */
+List *
+infer_arbiter_indexes(PlannerInfo *root)
+{
+ OnConflictExpr *onconflict = root->parse->onConflict;
+
+ /* Iteration state */
+ RangeTblEntry *rte;
+ Relation relation;
+ Oid indexOidFromConstraint = InvalidOid;
+ List *indexList;
+ ListCell *l;
+
+ /* Normalized inference attributes and inference expressions: */
+ Bitmapset *inferAttrs = NULL;
+ List *inferElems = NIL;
+
+ /* Results */
+ List *results = NIL;
+
+ /*
+ * Quickly return NIL for ON CONFLICT DO NOTHING without an inference
+ * specification or named constraint. ON CONFLICT DO UPDATE statements
+ * must always provide one or the other (but parser ought to have caught
+ * that already).
+ */
+ if (onconflict->arbiterElems == NIL &&
+ onconflict->constraint == InvalidOid)
+ return NIL;
+
+ /*
+ * We need not lock the relation since it was already locked, either by
+ * the rewriter or when expand_inherited_rtentry() added it to the query's
+ * rangetable.
+ */
+ rte = rt_fetch(root->parse->resultRelation, root->parse->rtable);
+
+ relation = table_open(rte->relid, NoLock);
+
+ /*
+ * Build normalized/BMS representation of plain indexed attributes, as
+ * well as a separate list of expression items. This simplifies matching
+ * the cataloged definition of indexes.
+ */
+ foreach(l, onconflict->arbiterElems)
+ {
+ InferenceElem *elem = (InferenceElem *) lfirst(l);
+ Var *var;
+ int attno;
+
+ if (!IsA(elem->expr, Var))
+ {
+ /* If not a plain Var, just shove it in inferElems for now */
+ inferElems = lappend(inferElems, elem->expr);
+ continue;
+ }
+
+ var = (Var *) elem->expr;
+ attno = var->varattno;
+
+ if (attno == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("whole row unique index inference specifications are not supported")));
+
+ inferAttrs = bms_add_member(inferAttrs,
+ attno - FirstLowInvalidHeapAttributeNumber);
+ }
+
+ /*
+ * Lookup named constraint's index. This is not immediately returned
+ * because some additional sanity checks are required.
+ */
+ if (onconflict->constraint != InvalidOid)
+ {
+ indexOidFromConstraint = get_constraint_index(onconflict->constraint);
+
+ if (indexOidFromConstraint == InvalidOid)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("constraint in ON CONFLICT clause has no associated index")));
+ }
+
+ /*
+ * Using that representation, iterate through the list of indexes on the
+ * target relation to try and find a match
+ */
+ indexList = RelationGetIndexList(relation);
+
+ foreach(l, indexList)
+ {
+ Oid indexoid = lfirst_oid(l);
+ Relation idxRel;
+ Form_pg_index idxForm;
+ Bitmapset *indexedAttrs;
+ List *idxExprs;
+ List *predExprs;
+ AttrNumber natt;
+ ListCell *el;
+
+ /*
+ * Extract info from the relation descriptor for the index. Obtain
+ * the same lock type that the executor will ultimately use.
+ *
+ * Let executor complain about !indimmediate case directly, because
+ * enforcement needs to occur there anyway when an inference clause is
+ * omitted.
+ */
+ idxRel = index_open(indexoid, rte->rellockmode);
+ idxForm = idxRel->rd_index;
+
+ if (!idxForm->indisvalid)
+ goto next;
+
+ /*
+ * Note that we do not perform a check against indcheckxmin (like e.g.
+ * get_relation_info()) here to eliminate candidates, because
+ * uniqueness checking only cares about the most recently committed
+ * tuple versions.
+ */
+
+ /*
+ * Look for match on "ON constraint_name" variant, which may not be
+ * unique constraint. This can only be a constraint name.
+ */
+ if (indexOidFromConstraint == idxForm->indexrelid)
+ {
+ if (!idxForm->indisunique && onconflict->action == ONCONFLICT_UPDATE)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("ON CONFLICT DO UPDATE not supported with exclusion constraints")));
+
+ results = lappend_oid(results, idxForm->indexrelid);
+ list_free(indexList);
+ index_close(idxRel, NoLock);
+ table_close(relation, NoLock);
+ return results;
+ }
+ else if (indexOidFromConstraint != InvalidOid)
+ {
+ /* No point in further work for index in named constraint case */
+ goto next;
+ }
+
+ /*
+ * Only considering conventional inference at this point (not named
+ * constraints), so index under consideration can be immediately
+ * skipped if it's not unique
+ */
+ if (!idxForm->indisunique)
+ goto next;
+
+ /* Build BMS representation of plain (non expression) index attrs */
+ indexedAttrs = NULL;
+ for (natt = 0; natt < idxForm->indnkeyatts; natt++)
+ {
+ int attno = idxRel->rd_index->indkey.values[natt];
+
+ if (attno != 0)
+ indexedAttrs = bms_add_member(indexedAttrs,
+ attno - FirstLowInvalidHeapAttributeNumber);
+ }
+
+ /* Non-expression attributes (if any) must match */
+ if (!bms_equal(indexedAttrs, inferAttrs))
+ goto next;
+
+ /* Expression attributes (if any) must match */
+ idxExprs = RelationGetIndexExpressions(idxRel);
+ foreach(el, onconflict->arbiterElems)
+ {
+ InferenceElem *elem = (InferenceElem *) lfirst(el);
+
+ /*
+ * Ensure that collation/opclass aspects of inference expression
+ * element match. Even though this loop is primarily concerned
+ * with matching expressions, it is a convenient point to check
+ * this for both expressions and ordinary (non-expression)
+ * attributes appearing as inference elements.
+ */
+ if (!infer_collation_opclass_match(elem, idxRel, idxExprs))
+ goto next;
+
+ /*
+ * Plain Vars don't factor into count of expression elements, and
+ * the question of whether or not they satisfy the index
+ * definition has already been considered (they must).
+ */
+ if (IsA(elem->expr, Var))
+ continue;
+
+ /*
+ * Might as well avoid redundant check in the rare cases where
+ * infer_collation_opclass_match() is required to do real work.
+ * Otherwise, check that element expression appears in cataloged
+ * index definition.
+ */
+ if (elem->infercollid != InvalidOid ||
+ elem->inferopclass != InvalidOid ||
+ list_member(idxExprs, elem->expr))
+ continue;
+
+ goto next;
+ }
+
+ /*
+ * Now that all inference elements were matched, ensure that the
+ * expression elements from inference clause are not missing any
+ * cataloged expressions. This does the right thing when unique
+ * indexes redundantly repeat the same attribute, or if attributes
+ * redundantly appear multiple times within an inference clause.
+ */
+ if (list_difference(idxExprs, inferElems) != NIL)
+ goto next;
+
+ /*
+ * If it's a partial index, its predicate must be implied by the ON
+ * CONFLICT's WHERE clause.
+ */
+ predExprs = RelationGetIndexPredicate(idxRel);
+
+ if (!predicate_implied_by(predExprs, (List *) onconflict->arbiterWhere, false))
+ goto next;
+
+ results = lappend_oid(results, idxForm->indexrelid);
+next:
+ index_close(idxRel, NoLock);
+ }
+
+ list_free(indexList);
+ table_close(relation, NoLock);
+
+ if (results == NIL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
+ errmsg("there is no unique or exclusion constraint matching the ON CONFLICT specification")));
+
+ return results;
+}
+
+/*
+ * infer_collation_opclass_match - ensure infer element opclass/collation match
+ *
+ * Given unique index inference element from inference specification, if
+ * collation was specified, or if opclass was specified, verify that there is
+ * at least one matching indexed attribute (occasionally, there may be more).
+ * Skip this in the common case where inference specification does not include
+ * collation or opclass (instead matching everything, regardless of cataloged
+ * collation/opclass of indexed attribute).
+ *
+ * At least historically, Postgres has not offered collations or opclasses
+ * with alternative-to-default notions of equality, so these additional
+ * criteria should only be required infrequently.
+ *
+ * Don't give up immediately when an inference element matches some attribute
+ * cataloged as indexed but not matching additional opclass/collation
+ * criteria. This is done so that the implementation is as forgiving as
+ * possible of redundancy within cataloged index attributes (or, less
+ * usefully, within inference specification elements). If collations actually
+ * differ between apparently redundantly indexed attributes (redundant within
+ * or across indexes), then there really is no redundancy as such.
+ *
+ * Note that if an inference element specifies an opclass and a collation at
+ * once, both must match in at least one particular attribute within index
+ * catalog definition in order for that inference element to be considered
+ * inferred/satisfied.
+ */
+static bool
+infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
+ List *idxExprs)
+{
+ AttrNumber natt;
+ Oid inferopfamily = InvalidOid; /* OID of opclass opfamily */
+ Oid inferopcinputtype = InvalidOid; /* OID of opclass input type */
+ int nplain = 0; /* # plain attrs observed */
+
+ /*
+ * If inference specification element lacks collation/opclass, then no
+ * need to check for exact match.
+ */
+ if (elem->infercollid == InvalidOid && elem->inferopclass == InvalidOid)
+ return true;
+
+ /*
+ * Lookup opfamily and input type, for matching indexes
+ */
+ if (elem->inferopclass)
+ {
+ inferopfamily = get_opclass_family(elem->inferopclass);
+ inferopcinputtype = get_opclass_input_type(elem->inferopclass);
+ }
+
+ for (natt = 1; natt <= idxRel->rd_att->natts; natt++)
+ {
+ Oid opfamily = idxRel->rd_opfamily[natt - 1];
+ Oid opcinputtype = idxRel->rd_opcintype[natt - 1];
+ Oid collation = idxRel->rd_indcollation[natt - 1];
+ int attno = idxRel->rd_index->indkey.values[natt - 1];
+
+ if (attno != 0)
+ nplain++;
+
+ if (elem->inferopclass != InvalidOid &&
+ (inferopfamily != opfamily || inferopcinputtype != opcinputtype))
+ {
+ /* Attribute needed to match opclass, but didn't */
+ continue;
+ }
+
+ if (elem->infercollid != InvalidOid &&
+ elem->infercollid != collation)
+ {
+ /* Attribute needed to match collation, but didn't */
+ continue;
+ }
+
+ /* If one matching index att found, good enough -- return true */
+ if (IsA(elem->expr, Var))
+ {
+ if (((Var *) elem->expr)->varattno == attno)
+ return true;
+ }
+ else if (attno == 0)
+ {
+ Node *nattExpr = list_nth(idxExprs, (natt - 1) - nplain);
+
+ /*
+ * Note that unlike routines like match_index_to_operand() we
+ * don't need to care about RelabelType. Neither the index
+ * definition nor the inference clause should contain them.
+ */
+ if (equal(elem->expr, nattExpr))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * estimate_rel_size - estimate # pages and # tuples in a table or index
+ *
+ * We also estimate the fraction of the pages that are marked all-visible in
+ * the visibility map, for use in estimation of index-only scans.
+ *
+ * If attr_widths isn't NULL, it points to the zero-index entry of the
+ * relation's attr_widths[] cache; we fill this in if we have need to compute
+ * the attribute widths for estimation purposes.
+ */
+void
+estimate_rel_size(Relation rel, int32 *attr_widths,
+ BlockNumber *pages, double *tuples, double *allvisfrac)
+{
+ BlockNumber curpages;
+ BlockNumber relpages;
+ double reltuples;
+ BlockNumber relallvisible;
+ double density;
+
+ if (RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind))
+ {
+ table_relation_estimate_size(rel, attr_widths, pages, tuples,
+ allvisfrac);
+ }
+ else if (rel->rd_rel->relkind == RELKIND_INDEX)
+ {
+ /*
+ * XXX: It'd probably be good to move this into a callback, individual
+ * index types e.g. know if they have a metapage.
+ */
+
+ /* it has storage, ok to call the smgr */
+ curpages = RelationGetNumberOfBlocks(rel);
+
+ /* report estimated # pages */
+ *pages = curpages;
+ /* quick exit if rel is clearly empty */
+ if (curpages == 0)
+ {
+ *tuples = 0;
+ *allvisfrac = 0;
+ return;
+ }
+
+ /* coerce values in pg_class to more desirable types */
+ relpages = (BlockNumber) rel->rd_rel->relpages;
+ reltuples = (double) rel->rd_rel->reltuples;
+ relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
+
+ /*
+ * Discount the metapage while estimating the number of tuples. This
+ * is a kluge because it assumes more than it ought to about index
+ * structure. Currently it's OK for btree, hash, and GIN indexes but
+ * suspect for GiST indexes.
+ */
+ if (relpages > 0)
+ {
+ curpages--;
+ relpages--;
+ }
+
+ /* estimate number of tuples from previous tuple density */
+ if (reltuples >= 0 && relpages > 0)
+ density = reltuples / (double) relpages;
+ else
+ {
+ /*
+ * If we have no data because the relation was never vacuumed,
+ * estimate tuple width from attribute datatypes. We assume here
+ * that the pages are completely full, which is OK for tables
+ * (since they've presumably not been VACUUMed yet) but is
+ * probably an overestimate for indexes. Fortunately
+ * get_relation_info() can clamp the overestimate to the parent
+ * table's size.
+ *
+ * Note: this code intentionally disregards alignment
+ * considerations, because (a) that would be gilding the lily
+ * considering how crude the estimate is, and (b) it creates
+ * platform dependencies in the default plans which are kind of a
+ * headache for regression testing.
+ *
+ * XXX: Should this logic be more index specific?
+ */
+ int32 tuple_width;
+
+ tuple_width = get_rel_data_width(rel, attr_widths);
+ tuple_width += MAXALIGN(SizeofHeapTupleHeader);
+ tuple_width += sizeof(ItemIdData);
+ /* note: integer division is intentional here */
+ density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;
+ }
+ *tuples = rint(density * (double) curpages);
+
+ /*
+ * We use relallvisible as-is, rather than scaling it up like we do
+ * for the pages and tuples counts, on the theory that any pages added
+ * since the last VACUUM are most likely not marked all-visible. But
+ * costsize.c wants it converted to a fraction.
+ */
+ if (relallvisible == 0 || curpages <= 0)
+ *allvisfrac = 0;
+ else if ((double) relallvisible >= curpages)
+ *allvisfrac = 1;
+ else
+ *allvisfrac = (double) relallvisible / curpages;
+ }
+ else
+ {
+ /*
+ * Just use whatever's in pg_class. This covers foreign tables,
+ * sequences, and also relkinds without storage (shouldn't get here?);
+ * see initializations in AddNewRelationTuple(). Note that FDW must
+ * cope if reltuples is -1!
+ */
+ *pages = rel->rd_rel->relpages;
+ *tuples = rel->rd_rel->reltuples;
+ *allvisfrac = 0;
+ }
+}
+
+
+/*
+ * get_rel_data_width
+ *
+ * Estimate the average width of (the data part of) the relation's tuples.
+ *
+ * If attr_widths isn't NULL, it points to the zero-index entry of the
+ * relation's attr_widths[] cache; use and update that cache as appropriate.
+ *
+ * Currently we ignore dropped columns. Ideally those should be included
+ * in the result, but we haven't got any way to get info about them; and
+ * since they might be mostly NULLs, treating them as zero-width is not
+ * necessarily the wrong thing anyway.
+ */
+int32
+get_rel_data_width(Relation rel, int32 *attr_widths)
+{
+ int32 tuple_width = 0;
+ int i;
+
+ for (i = 1; i <= RelationGetNumberOfAttributes(rel); i++)
+ {
+ Form_pg_attribute att = TupleDescAttr(rel->rd_att, i - 1);
+ int32 item_width;
+
+ if (att->attisdropped)
+ continue;
+
+ /* use previously cached data, if any */
+ if (attr_widths != NULL && attr_widths[i] > 0)
+ {
+ tuple_width += attr_widths[i];
+ continue;
+ }
+
+ /* This should match set_rel_width() in costsize.c */
+ item_width = get_attavgwidth(RelationGetRelid(rel), i);
+ if (item_width <= 0)
+ {
+ item_width = get_typavgwidth(att->atttypid, att->atttypmod);
+ Assert(item_width > 0);
+ }
+ if (attr_widths != NULL)
+ attr_widths[i] = item_width;
+ tuple_width += item_width;
+ }
+
+ return tuple_width;
+}
+
+/*
+ * get_relation_data_width
+ *
+ * External API for get_rel_data_width: same behavior except we have to
+ * open the relcache entry.
+ */
+int32
+get_relation_data_width(Oid relid, int32 *attr_widths)
+{
+ int32 result;
+ Relation relation;
+
+ /* As above, assume relation is already locked */
+ relation = table_open(relid, NoLock);
+
+ result = get_rel_data_width(relation, attr_widths);
+
+ table_close(relation, NoLock);
+
+ return result;
+}
+
+
+/*
+ * get_relation_constraints
+ *
+ * Retrieve the applicable constraint expressions of the given relation.
+ *
+ * Returns a List (possibly empty) of constraint expressions. Each one
+ * has been canonicalized, and its Vars are changed to have the varno
+ * indicated by rel->relid. This allows the expressions to be easily
+ * compared to expressions taken from WHERE.
+ *
+ * If include_noinherit is true, it's okay to include constraints that
+ * are marked NO INHERIT.
+ *
+ * If include_notnull is true, "col IS NOT NULL" expressions are generated
+ * and added to the result for each column that's marked attnotnull.
+ *
+ * If include_partition is true, and the relation is a partition,
+ * also include the partitioning constraints.
+ *
+ * Note: at present this is invoked at most once per relation per planner
+ * run, and in many cases it won't be invoked at all, so there seems no
+ * point in caching the data in RelOptInfo.
+ */
+static List *
+get_relation_constraints(PlannerInfo *root,
+ Oid relationObjectId, RelOptInfo *rel,
+ bool include_noinherit,
+ bool include_notnull,
+ bool include_partition)
+{
+ List *result = NIL;
+ Index varno = rel->relid;
+ Relation relation;
+ TupleConstr *constr;
+
+ /*
+ * We assume the relation has already been safely locked.
+ */
+ relation = table_open(relationObjectId, NoLock);
+
+ constr = relation->rd_att->constr;
+ if (constr != NULL)
+ {
+ int num_check = constr->num_check;
+ int i;
+
+ for (i = 0; i < num_check; i++)
+ {
+ Node *cexpr;
+
+ /*
+ * If this constraint hasn't been fully validated yet, we must
+ * ignore it here. Also ignore if NO INHERIT and we weren't told
+ * that that's safe.
+ */
+ if (!constr->check[i].ccvalid)
+ continue;
+ if (constr->check[i].ccnoinherit && !include_noinherit)
+ continue;
+
+ cexpr = stringToNode(constr->check[i].ccbin);
+
+ /*
+ * Run each expression through const-simplification and
+ * canonicalization. This is not just an optimization, but is
+ * necessary, because we will be comparing it to
+ * similarly-processed qual clauses, and may fail to detect valid
+ * matches without this. This must match the processing done to
+ * qual clauses in preprocess_expression()! (We can skip the
+ * stuff involving subqueries, however, since we don't allow any
+ * in check constraints.)
+ */
+ cexpr = eval_const_expressions(root, cexpr);
+
+ cexpr = (Node *) canonicalize_qual((Expr *) cexpr, true);
+
+ /* Fix Vars to have the desired varno */
+ if (varno != 1)
+ ChangeVarNodes(cexpr, 1, varno, 0);
+
+ /*
+ * Finally, convert to implicit-AND format (that is, a List) and
+ * append the resulting item(s) to our output list.
+ */
+ result = list_concat(result,
+ make_ands_implicit((Expr *) cexpr));
+ }
+
+ /* Add NOT NULL constraints in expression form, if requested */
+ if (include_notnull && constr->has_not_null)
+ {
+ int natts = relation->rd_att->natts;
+
+ for (i = 1; i <= natts; i++)
+ {
+ Form_pg_attribute att = TupleDescAttr(relation->rd_att, i - 1);
+
+ if (att->attnotnull && !att->attisdropped)
+ {
+ NullTest *ntest = makeNode(NullTest);
+
+ ntest->arg = (Expr *) makeVar(varno,
+ i,
+ att->atttypid,
+ att->atttypmod,
+ att->attcollation,
+ 0);
+ ntest->nulltesttype = IS_NOT_NULL;
+
+ /*
+ * argisrow=false is correct even for a composite column,
+ * because attnotnull does not represent a SQL-spec IS NOT
+ * NULL test in such a case, just IS DISTINCT FROM NULL.
+ */
+ ntest->argisrow = false;
+ ntest->location = -1;
+ result = lappend(result, ntest);
+ }
+ }
+ }
+ }
+
+ /*
+ * Add partitioning constraints, if requested.
+ */
+ if (include_partition && relation->rd_rel->relispartition)
+ {
+ /* make sure rel->partition_qual is set */
+ set_baserel_partition_constraint(relation, rel);
+ result = list_concat(result, rel->partition_qual);
+ }
+
+ table_close(relation, NoLock);
+
+ return result;
+}
+
+/*
+ * Try loading data for the statistics object.
+ *
+ * We don't know if the data (specified by statOid and inh value) exist.
+ * The result is stored in stainfos list.
+ */
+static void
+get_relation_statistics_worker(List **stainfos, RelOptInfo *rel,
+ Oid statOid, bool inh,
+ Bitmapset *keys, List *exprs)
+{
+ Form_pg_statistic_ext_data dataForm;
+ HeapTuple dtup;
+
+ dtup = SearchSysCache2(STATEXTDATASTXOID,
+ ObjectIdGetDatum(statOid), BoolGetDatum(inh));
+ if (!HeapTupleIsValid(dtup))
+ return;
+
+ dataForm = (Form_pg_statistic_ext_data) GETSTRUCT(dtup);
+
+ /* add one StatisticExtInfo for each kind built */
+ if (statext_is_kind_built(dtup, STATS_EXT_NDISTINCT))
+ {
+ StatisticExtInfo *info = makeNode(StatisticExtInfo);
+
+ info->statOid = statOid;
+ info->inherit = dataForm->stxdinherit;
+ info->rel = rel;
+ info->kind = STATS_EXT_NDISTINCT;
+ info->keys = bms_copy(keys);
+ info->exprs = exprs;
+
+ *stainfos = lappend(*stainfos, info);
+ }
+
+ if (statext_is_kind_built(dtup, STATS_EXT_DEPENDENCIES))
+ {
+ StatisticExtInfo *info = makeNode(StatisticExtInfo);
+
+ info->statOid = statOid;
+ info->inherit = dataForm->stxdinherit;
+ info->rel = rel;
+ info->kind = STATS_EXT_DEPENDENCIES;
+ info->keys = bms_copy(keys);
+ info->exprs = exprs;
+
+ *stainfos = lappend(*stainfos, info);
+ }
+
+ if (statext_is_kind_built(dtup, STATS_EXT_MCV))
+ {
+ StatisticExtInfo *info = makeNode(StatisticExtInfo);
+
+ info->statOid = statOid;
+ info->inherit = dataForm->stxdinherit;
+ info->rel = rel;
+ info->kind = STATS_EXT_MCV;
+ info->keys = bms_copy(keys);
+ info->exprs = exprs;
+
+ *stainfos = lappend(*stainfos, info);
+ }
+
+ if (statext_is_kind_built(dtup, STATS_EXT_EXPRESSIONS))
+ {
+ StatisticExtInfo *info = makeNode(StatisticExtInfo);
+
+ info->statOid = statOid;
+ info->inherit = dataForm->stxdinherit;
+ info->rel = rel;
+ info->kind = STATS_EXT_EXPRESSIONS;
+ info->keys = bms_copy(keys);
+ info->exprs = exprs;
+
+ *stainfos = lappend(*stainfos, info);
+ }
+
+ ReleaseSysCache(dtup);
+}
+
+/*
+ * get_relation_statistics
+ * Retrieve extended statistics defined on the table.
+ *
+ * Returns a List (possibly empty) of StatisticExtInfo objects describing
+ * the statistics. Note that this doesn't load the actual statistics data,
+ * just the identifying metadata. Only stats actually built are considered.
+ */
+static List *
+get_relation_statistics(RelOptInfo *rel, Relation relation)
+{
+ Index varno = rel->relid;
+ List *statoidlist;
+ List *stainfos = NIL;
+ ListCell *l;
+
+ statoidlist = RelationGetStatExtList(relation);
+
+ foreach(l, statoidlist)
+ {
+ Oid statOid = lfirst_oid(l);
+ Form_pg_statistic_ext staForm;
+ HeapTuple htup;
+ Bitmapset *keys = NULL;
+ List *exprs = NIL;
+ int i;
+
+ htup = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statOid));
+ if (!HeapTupleIsValid(htup))
+ elog(ERROR, "cache lookup failed for statistics object %u", statOid);
+ staForm = (Form_pg_statistic_ext) GETSTRUCT(htup);
+
+ /*
+ * First, build the array of columns covered. This is ultimately
+ * wasted if no stats within the object have actually been built, but
+ * it doesn't seem worth troubling over that case.
+ */
+ for (i = 0; i < staForm->stxkeys.dim1; i++)
+ keys = bms_add_member(keys, staForm->stxkeys.values[i]);
+
+ /*
+ * Preprocess expressions (if any). We read the expressions, run them
+ * through eval_const_expressions, and fix the varnos.
+ *
+ * XXX We don't know yet if there are any data for this stats object,
+ * with either stxdinherit value. But it's reasonable to assume there
+ * is at least one of those, possibly both. So it's better to process
+ * keys and expressions here.
+ */
+ {
+ bool isnull;
+ Datum datum;
+
+ /* decode expression (if any) */
+ datum = SysCacheGetAttr(STATEXTOID, htup,
+ Anum_pg_statistic_ext_stxexprs, &isnull);
+
+ if (!isnull)
+ {
+ char *exprsString;
+
+ exprsString = TextDatumGetCString(datum);
+ exprs = (List *) stringToNode(exprsString);
+ pfree(exprsString);
+
+ /*
+ * Run the expressions through eval_const_expressions. This is
+ * not just an optimization, but is necessary, because the
+ * planner will be comparing them to similarly-processed qual
+ * clauses, and may fail to detect valid matches without this.
+ * We must not use canonicalize_qual, however, since these
+ * aren't qual expressions.
+ */
+ exprs = (List *) eval_const_expressions(NULL, (Node *) exprs);
+
+ /* May as well fix opfuncids too */
+ fix_opfuncids((Node *) exprs);
+
+ /*
+ * Modify the copies we obtain from the relcache to have the
+ * correct varno for the parent relation, so that they match
+ * up correctly against qual clauses.
+ */
+ if (varno != 1)
+ ChangeVarNodes((Node *) exprs, 1, varno, 0);
+ }
+ }
+
+ /* extract statistics for possible values of stxdinherit flag */
+
+ get_relation_statistics_worker(&stainfos, rel, statOid, true, keys, exprs);
+
+ get_relation_statistics_worker(&stainfos, rel, statOid, false, keys, exprs);
+
+ ReleaseSysCache(htup);
+ bms_free(keys);
+ }
+
+ list_free(statoidlist);
+
+ return stainfos;
+}
+
+/*
+ * relation_excluded_by_constraints
+ *
+ * Detect whether the relation need not be scanned because it has either
+ * self-inconsistent restrictions, or restrictions inconsistent with the
+ * relation's applicable constraints.
+ *
+ * Note: this examines only rel->relid, rel->reloptkind, and
+ * rel->baserestrictinfo; therefore it can be called before filling in
+ * other fields of the RelOptInfo.
+ */
+bool
+relation_excluded_by_constraints(PlannerInfo *root,
+ RelOptInfo *rel, RangeTblEntry *rte)
+{
+ bool include_noinherit;
+ bool include_notnull;
+ bool include_partition = false;
+ List *safe_restrictions;
+ List *constraint_pred;
+ List *safe_constraints;
+ ListCell *lc;
+
+ /* As of now, constraint exclusion works only with simple relations. */
+ Assert(IS_SIMPLE_REL(rel));
+
+ /*
+ * If there are no base restriction clauses, we have no hope of proving
+ * anything below, so fall out quickly.
+ */
+ if (rel->baserestrictinfo == NIL)
+ return false;
+
+ /*
+ * Regardless of the setting of constraint_exclusion, detect
+ * constant-FALSE-or-NULL restriction clauses. Because const-folding will
+ * reduce "anything AND FALSE" to just "FALSE", any such case should
+ * result in exactly one baserestrictinfo entry. This doesn't fire very
+ * often, but it seems cheap enough to be worth doing anyway. (Without
+ * this, we'd miss some optimizations that 9.5 and earlier found via much
+ * more roundabout methods.)
+ */
+ if (list_length(rel->baserestrictinfo) == 1)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) linitial(rel->baserestrictinfo);
+ Expr *clause = rinfo->clause;
+
+ if (clause && IsA(clause, Const) &&
+ (((Const *) clause)->constisnull ||
+ !DatumGetBool(((Const *) clause)->constvalue)))
+ return true;
+ }
+
+ /*
+ * Skip further tests, depending on constraint_exclusion.
+ */
+ switch (constraint_exclusion)
+ {
+ case CONSTRAINT_EXCLUSION_OFF:
+ /* In 'off' mode, never make any further tests */
+ return false;
+
+ case CONSTRAINT_EXCLUSION_PARTITION:
+
+ /*
+ * When constraint_exclusion is set to 'partition' we only handle
+ * appendrel members. Partition pruning has already been applied,
+ * so there is no need to consider the rel's partition constraints
+ * here.
+ */
+ if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL)
+ break; /* appendrel member, so process it */
+ return false;
+
+ case CONSTRAINT_EXCLUSION_ON:
+
+ /*
+ * In 'on' mode, always apply constraint exclusion. If we are
+ * considering a baserel that is a partition (i.e., it was
+ * directly named rather than expanded from a parent table), then
+ * its partition constraints haven't been considered yet, so
+ * include them in the processing here.
+ */
+ if (rel->reloptkind == RELOPT_BASEREL)
+ include_partition = true;
+ break; /* always try to exclude */
+ }
+
+ /*
+ * Check for self-contradictory restriction clauses. We dare not make
+ * deductions with non-immutable functions, but any immutable clauses that
+ * are self-contradictory allow us to conclude the scan is unnecessary.
+ *
+ * Note: strip off RestrictInfo because predicate_refuted_by() isn't
+ * expecting to see any in its predicate argument.
+ */
+ safe_restrictions = NIL;
+ foreach(lc, rel->baserestrictinfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ if (!contain_mutable_functions((Node *) rinfo->clause))
+ safe_restrictions = lappend(safe_restrictions, rinfo->clause);
+ }
+
+ /*
+ * We can use weak refutation here, since we're comparing restriction
+ * clauses with restriction clauses.
+ */
+ if (predicate_refuted_by(safe_restrictions, safe_restrictions, true))
+ return true;
+
+ /*
+ * Only plain relations have constraints, so stop here for other rtekinds.
+ */
+ if (rte->rtekind != RTE_RELATION)
+ return false;
+
+ /*
+ * If we are scanning just this table, we can use NO INHERIT constraints,
+ * but not if we're scanning its children too. (Note that partitioned
+ * tables should never have NO INHERIT constraints; but it's not necessary
+ * for us to assume that here.)
+ */
+ include_noinherit = !rte->inh;
+
+ /*
+ * Currently, attnotnull constraints must be treated as NO INHERIT unless
+ * this is a partitioned table. In future we might track their
+ * inheritance status more accurately, allowing this to be refined.
+ */
+ include_notnull = (!rte->inh || rte->relkind == RELKIND_PARTITIONED_TABLE);
+
+ /*
+ * Fetch the appropriate set of constraint expressions.
+ */
+ constraint_pred = get_relation_constraints(root, rte->relid, rel,
+ include_noinherit,
+ include_notnull,
+ include_partition);
+
+ /*
+ * We do not currently enforce that CHECK constraints contain only
+ * immutable functions, so it's necessary to check here. We daren't draw
+ * conclusions from plan-time evaluation of non-immutable functions. Since
+ * they're ANDed, we can just ignore any mutable constraints in the list,
+ * and reason about the rest.
+ */
+ safe_constraints = NIL;
+ foreach(lc, constraint_pred)
+ {
+ Node *pred = (Node *) lfirst(lc);
+
+ if (!contain_mutable_functions(pred))
+ safe_constraints = lappend(safe_constraints, pred);
+ }
+
+ /*
+ * The constraints are effectively ANDed together, so we can just try to
+ * refute the entire collection at once. This may allow us to make proofs
+ * that would fail if we took them individually.
+ *
+ * Note: we use rel->baserestrictinfo, not safe_restrictions as might seem
+ * an obvious optimization. Some of the clauses might be OR clauses that
+ * have volatile and nonvolatile subclauses, and it's OK to make
+ * deductions with the nonvolatile parts.
+ *
+ * We need strong refutation because we have to prove that the constraints
+ * would yield false, not just NULL.
+ */
+ if (predicate_refuted_by(safe_constraints, rel->baserestrictinfo, false))
+ return true;
+
+ return false;
+}
+
+
+/*
+ * build_physical_tlist
+ *
+ * Build a targetlist consisting of exactly the relation's user attributes,
+ * in order. The executor can special-case such tlists to avoid a projection
+ * step at runtime, so we use such tlists preferentially for scan nodes.
+ *
+ * Exception: if there are any dropped or missing columns, we punt and return
+ * NIL. Ideally we would like to handle these cases too. However this
+ * creates problems for ExecTypeFromTL, which may be asked to build a tupdesc
+ * for a tlist that includes vars of no-longer-existent types. In theory we
+ * could dig out the required info from the pg_attribute entries of the
+ * relation, but that data is not readily available to ExecTypeFromTL.
+ * For now, we don't apply the physical-tlist optimization when there are
+ * dropped cols.
+ *
+ * We also support building a "physical" tlist for subqueries, functions,
+ * values lists, table expressions, and CTEs, since the same optimization can
+ * occur in SubqueryScan, FunctionScan, ValuesScan, CteScan, TableFunc,
+ * NamedTuplestoreScan, and WorkTableScan nodes.
+ */
+List *
+build_physical_tlist(PlannerInfo *root, RelOptInfo *rel)
+{
+ List *tlist = NIL;
+ Index varno = rel->relid;
+ RangeTblEntry *rte = planner_rt_fetch(varno, root);
+ Relation relation;
+ Query *subquery;
+ Var *var;
+ ListCell *l;
+ int attrno,
+ numattrs;
+ List *colvars;
+
+ switch (rte->rtekind)
+ {
+ case RTE_RELATION:
+ /* Assume we already have adequate lock */
+ relation = table_open(rte->relid, NoLock);
+
+ numattrs = RelationGetNumberOfAttributes(relation);
+ for (attrno = 1; attrno <= numattrs; attrno++)
+ {
+ Form_pg_attribute att_tup = TupleDescAttr(relation->rd_att,
+ attrno - 1);
+
+ if (att_tup->attisdropped || att_tup->atthasmissing)
+ {
+ /* found a dropped or missing col, so punt */
+ tlist = NIL;
+ break;
+ }
+
+ var = makeVar(varno,
+ attrno,
+ att_tup->atttypid,
+ att_tup->atttypmod,
+ att_tup->attcollation,
+ 0);
+
+ tlist = lappend(tlist,
+ makeTargetEntry((Expr *) var,
+ attrno,
+ NULL,
+ false));
+ }
+
+ table_close(relation, NoLock);
+ break;
+
+ case RTE_SUBQUERY:
+ subquery = rte->subquery;
+ foreach(l, subquery->targetList)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ /*
+ * A resjunk column of the subquery can be reflected as
+ * resjunk in the physical tlist; we need not punt.
+ */
+ var = makeVarFromTargetEntry(varno, tle);
+
+ tlist = lappend(tlist,
+ makeTargetEntry((Expr *) var,
+ tle->resno,
+ NULL,
+ tle->resjunk));
+ }
+ break;
+
+ case RTE_FUNCTION:
+ case RTE_TABLEFUNC:
+ case RTE_VALUES:
+ case RTE_CTE:
+ case RTE_NAMEDTUPLESTORE:
+ case RTE_RESULT:
+ /* Not all of these can have dropped cols, but share code anyway */
+ expandRTE(rte, varno, 0, -1, true /* include dropped */ ,
+ NULL, &colvars);
+ foreach(l, colvars)
+ {
+ var = (Var *) lfirst(l);
+
+ /*
+ * A non-Var in expandRTE's output means a dropped column;
+ * must punt.
+ */
+ if (!IsA(var, Var))
+ {
+ tlist = NIL;
+ break;
+ }
+
+ tlist = lappend(tlist,
+ makeTargetEntry((Expr *) var,
+ var->varattno,
+ NULL,
+ false));
+ }
+ break;
+
+ default:
+ /* caller error */
+ elog(ERROR, "unsupported RTE kind %d in build_physical_tlist",
+ (int) rte->rtekind);
+ break;
+ }
+
+ return tlist;
+}
+
+/*
+ * build_index_tlist
+ *
+ * Build a targetlist representing the columns of the specified index.
+ * Each column is represented by a Var for the corresponding base-relation
+ * column, or an expression in base-relation Vars, as appropriate.
+ *
+ * There are never any dropped columns in indexes, so unlike
+ * build_physical_tlist, we need no failure case.
+ */
+static List *
+build_index_tlist(PlannerInfo *root, IndexOptInfo *index,
+ Relation heapRelation)
+{
+ List *tlist = NIL;
+ Index varno = index->rel->relid;
+ ListCell *indexpr_item;
+ int i;
+
+ indexpr_item = list_head(index->indexprs);
+ for (i = 0; i < index->ncolumns; i++)
+ {
+ int indexkey = index->indexkeys[i];
+ Expr *indexvar;
+
+ if (indexkey != 0)
+ {
+ /* simple column */
+ const FormData_pg_attribute *att_tup;
+
+ if (indexkey < 0)
+ att_tup = SystemAttributeDefinition(indexkey);
+ else
+ att_tup = TupleDescAttr(heapRelation->rd_att, indexkey - 1);
+
+ indexvar = (Expr *) makeVar(varno,
+ indexkey,
+ att_tup->atttypid,
+ att_tup->atttypmod,
+ att_tup->attcollation,
+ 0);
+ }
+ else
+ {
+ /* expression column */
+ if (indexpr_item == NULL)
+ elog(ERROR, "wrong number of index expressions");
+ indexvar = (Expr *) lfirst(indexpr_item);
+ indexpr_item = lnext(index->indexprs, indexpr_item);
+ }
+
+ tlist = lappend(tlist,
+ makeTargetEntry(indexvar,
+ i + 1,
+ NULL,
+ false));
+ }
+ if (indexpr_item != NULL)
+ elog(ERROR, "wrong number of index expressions");
+
+ return tlist;
+}
+
+/*
+ * restriction_selectivity
+ *
+ * Returns the selectivity of a specified restriction operator clause.
+ * This code executes registered procedures stored in the
+ * operator relation, by calling the function manager.
+ *
+ * See clause_selectivity() for the meaning of the additional parameters.
+ */
+Selectivity
+restriction_selectivity(PlannerInfo *root,
+ Oid operatorid,
+ List *args,
+ Oid inputcollid,
+ int varRelid)
+{
+ RegProcedure oprrest = get_oprrest(operatorid);
+ float8 result;
+
+ /*
+ * if the oprrest procedure is missing for whatever reason, use a
+ * selectivity of 0.5
+ */
+ if (!oprrest)
+ return (Selectivity) 0.5;
+
+ result = DatumGetFloat8(OidFunctionCall4Coll(oprrest,
+ inputcollid,
+ PointerGetDatum(root),
+ ObjectIdGetDatum(operatorid),
+ PointerGetDatum(args),
+ Int32GetDatum(varRelid)));
+
+ if (result < 0.0 || result > 1.0)
+ elog(ERROR, "invalid restriction selectivity: %f", result);
+
+ return (Selectivity) result;
+}
+
+/*
+ * join_selectivity
+ *
+ * Returns the selectivity of a specified join operator clause.
+ * This code executes registered procedures stored in the
+ * operator relation, by calling the function manager.
+ *
+ * See clause_selectivity() for the meaning of the additional parameters.
+ */
+Selectivity
+join_selectivity(PlannerInfo *root,
+ Oid operatorid,
+ List *args,
+ Oid inputcollid,
+ JoinType jointype,
+ SpecialJoinInfo *sjinfo)
+{
+ RegProcedure oprjoin = get_oprjoin(operatorid);
+ float8 result;
+
+ /*
+ * if the oprjoin procedure is missing for whatever reason, use a
+ * selectivity of 0.5
+ */
+ if (!oprjoin)
+ return (Selectivity) 0.5;
+
+ result = DatumGetFloat8(OidFunctionCall5Coll(oprjoin,
+ inputcollid,
+ PointerGetDatum(root),
+ ObjectIdGetDatum(operatorid),
+ PointerGetDatum(args),
+ Int16GetDatum(jointype),
+ PointerGetDatum(sjinfo)));
+
+ if (result < 0.0 || result > 1.0)
+ elog(ERROR, "invalid join selectivity: %f", result);
+
+ return (Selectivity) result;
+}
+
+/*
+ * function_selectivity
+ *
+ * Returns the selectivity of a specified boolean function clause.
+ * This code executes registered procedures stored in the
+ * pg_proc relation, by calling the function manager.
+ *
+ * See clause_selectivity() for the meaning of the additional parameters.
+ */
+Selectivity
+function_selectivity(PlannerInfo *root,
+ Oid funcid,
+ List *args,
+ Oid inputcollid,
+ bool is_join,
+ int varRelid,
+ JoinType jointype,
+ SpecialJoinInfo *sjinfo)
+{
+ RegProcedure prosupport = get_func_support(funcid);
+ SupportRequestSelectivity req;
+ SupportRequestSelectivity *sresult;
+
+ /*
+ * If no support function is provided, use our historical default
+ * estimate, 0.3333333. This seems a pretty unprincipled choice, but
+ * Postgres has been using that estimate for function calls since 1992.
+ * The hoariness of this behavior suggests that we should not be in too
+ * much hurry to use another value.
+ */
+ if (!prosupport)
+ return (Selectivity) 0.3333333;
+
+ req.type = T_SupportRequestSelectivity;
+ req.root = root;
+ req.funcid = funcid;
+ req.args = args;
+ req.inputcollid = inputcollid;
+ req.is_join = is_join;
+ req.varRelid = varRelid;
+ req.jointype = jointype;
+ req.sjinfo = sjinfo;
+ req.selectivity = -1; /* to catch failure to set the value */
+
+ sresult = (SupportRequestSelectivity *)
+ DatumGetPointer(OidFunctionCall1(prosupport,
+ PointerGetDatum(&req)));
+
+ /* If support function fails, use default */
+ if (sresult != &req)
+ return (Selectivity) 0.3333333;
+
+ if (req.selectivity < 0.0 || req.selectivity > 1.0)
+ elog(ERROR, "invalid function selectivity: %f", req.selectivity);
+
+ return (Selectivity) req.selectivity;
+}
+
+/*
+ * add_function_cost
+ *
+ * Get an estimate of the execution cost of a function, and *add* it to
+ * the contents of *cost. The estimate may include both one-time and
+ * per-tuple components, since QualCost does.
+ *
+ * The funcid must always be supplied. If it is being called as the
+ * implementation of a specific parsetree node (FuncExpr, OpExpr,
+ * WindowFunc, etc), pass that as "node", else pass NULL.
+ *
+ * In some usages root might be NULL, too.
+ */
+void
+add_function_cost(PlannerInfo *root, Oid funcid, Node *node,
+ QualCost *cost)
+{
+ HeapTuple proctup;
+ Form_pg_proc procform;
+
+ proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
+ if (!HeapTupleIsValid(proctup))
+ elog(ERROR, "cache lookup failed for function %u", funcid);
+ procform = (Form_pg_proc) GETSTRUCT(proctup);
+
+ if (OidIsValid(procform->prosupport))
+ {
+ SupportRequestCost req;
+ SupportRequestCost *sresult;
+
+ req.type = T_SupportRequestCost;
+ req.root = root;
+ req.funcid = funcid;
+ req.node = node;
+
+ /* Initialize cost fields so that support function doesn't have to */
+ req.startup = 0;
+ req.per_tuple = 0;
+
+ sresult = (SupportRequestCost *)
+ DatumGetPointer(OidFunctionCall1(procform->prosupport,
+ PointerGetDatum(&req)));
+
+ if (sresult == &req)
+ {
+ /* Success, so accumulate support function's estimate into *cost */
+ cost->startup += req.startup;
+ cost->per_tuple += req.per_tuple;
+ ReleaseSysCache(proctup);
+ return;
+ }
+ }
+
+ /* No support function, or it failed, so rely on procost */
+ cost->per_tuple += procform->procost * cpu_operator_cost;
+
+ ReleaseSysCache(proctup);
+}
+
+/*
+ * get_function_rows
+ *
+ * Get an estimate of the number of rows returned by a set-returning function.
+ *
+ * The funcid must always be supplied. In current usage, the calling node
+ * will always be supplied, and will be either a FuncExpr or OpExpr.
+ * But it's a good idea to not fail if it's NULL.
+ *
+ * In some usages root might be NULL, too.
+ *
+ * Note: this returns the unfiltered result of the support function, if any.
+ * It's usually a good idea to apply clamp_row_est() to the result, but we
+ * leave it to the caller to do so.
+ */
+double
+get_function_rows(PlannerInfo *root, Oid funcid, Node *node)
+{
+ HeapTuple proctup;
+ Form_pg_proc procform;
+ double result;
+
+ proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
+ if (!HeapTupleIsValid(proctup))
+ elog(ERROR, "cache lookup failed for function %u", funcid);
+ procform = (Form_pg_proc) GETSTRUCT(proctup);
+
+ Assert(procform->proretset); /* else caller error */
+
+ if (OidIsValid(procform->prosupport))
+ {
+ SupportRequestRows req;
+ SupportRequestRows *sresult;
+
+ req.type = T_SupportRequestRows;
+ req.root = root;
+ req.funcid = funcid;
+ req.node = node;
+
+ req.rows = 0; /* just for sanity */
+
+ sresult = (SupportRequestRows *)
+ DatumGetPointer(OidFunctionCall1(procform->prosupport,
+ PointerGetDatum(&req)));
+
+ if (sresult == &req)
+ {
+ /* Success */
+ ReleaseSysCache(proctup);
+ return req.rows;
+ }
+ }
+
+ /* No support function, or it failed, so rely on prorows */
+ result = procform->prorows;
+
+ ReleaseSysCache(proctup);
+
+ return result;
+}
+
+/*
+ * has_unique_index
+ *
+ * Detect whether there is a unique index on the specified attribute
+ * of the specified relation, thus allowing us to conclude that all
+ * the (non-null) values of the attribute are distinct.
+ *
+ * This function does not check the index's indimmediate property, which
+ * means that uniqueness may transiently fail to hold intra-transaction.
+ * That's appropriate when we are making statistical estimates, but beware
+ * of using this for any correctness proofs.
+ */
+bool
+has_unique_index(RelOptInfo *rel, AttrNumber attno)
+{
+ ListCell *ilist;
+
+ foreach(ilist, rel->indexlist)
+ {
+ IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
+
+ /*
+ * Note: ignore partial indexes, since they don't allow us to conclude
+ * that all attr values are distinct, *unless* they are marked predOK
+ * which means we know the index's predicate is satisfied by the
+ * query. We don't take any interest in expressional indexes either.
+ * Also, a multicolumn unique index doesn't allow us to conclude that
+ * just the specified attr is unique.
+ */
+ if (index->unique &&
+ index->nkeycolumns == 1 &&
+ index->indexkeys[0] == attno &&
+ (index->indpred == NIL || index->predOK))
+ return true;
+ }
+ return false;
+}
+
+
+/*
+ * has_row_triggers
+ *
+ * Detect whether the specified relation has any row-level triggers for event.
+ */
+bool
+has_row_triggers(PlannerInfo *root, Index rti, CmdType event)
+{
+ RangeTblEntry *rte = planner_rt_fetch(rti, root);
+ Relation relation;
+ TriggerDesc *trigDesc;
+ bool result = false;
+
+ /* Assume we already have adequate lock */
+ relation = table_open(rte->relid, NoLock);
+
+ trigDesc = relation->trigdesc;
+ switch (event)
+ {
+ case CMD_INSERT:
+ if (trigDesc &&
+ (trigDesc->trig_insert_after_row ||
+ trigDesc->trig_insert_before_row))
+ result = true;
+ break;
+ case CMD_UPDATE:
+ if (trigDesc &&
+ (trigDesc->trig_update_after_row ||
+ trigDesc->trig_update_before_row))
+ result = true;
+ break;
+ case CMD_DELETE:
+ if (trigDesc &&
+ (trigDesc->trig_delete_after_row ||
+ trigDesc->trig_delete_before_row))
+ result = true;
+ break;
+ /* There is no separate event for MERGE, only INSERT/UPDATE/DELETE */
+ case CMD_MERGE:
+ result = false;
+ break;
+ default:
+ elog(ERROR, "unrecognized CmdType: %d", (int) event);
+ break;
+ }
+
+ table_close(relation, NoLock);
+ return result;
+}
+
+/*
+ * has_stored_generated_columns
+ *
+ * Does table identified by RTI have any STORED GENERATED columns?
+ */
+bool
+has_stored_generated_columns(PlannerInfo *root, Index rti)
+{
+ RangeTblEntry *rte = planner_rt_fetch(rti, root);
+ Relation relation;
+ TupleDesc tupdesc;
+ bool result = false;
+
+ /* Assume we already have adequate lock */
+ relation = table_open(rte->relid, NoLock);
+
+ tupdesc = RelationGetDescr(relation);
+ result = tupdesc->constr && tupdesc->constr->has_generated_stored;
+
+ table_close(relation, NoLock);
+
+ return result;
+}
+
+/*
+ * get_dependent_generated_columns
+ *
+ * Get the column numbers of any STORED GENERATED columns of the relation
+ * that depend on any column listed in target_cols. Both the input and
+ * result bitmapsets contain column numbers offset by
+ * FirstLowInvalidHeapAttributeNumber.
+ */
+Bitmapset *
+get_dependent_generated_columns(PlannerInfo *root, Index rti,
+ Bitmapset *target_cols)
+{
+ Bitmapset *dependentCols = NULL;
+ RangeTblEntry *rte = planner_rt_fetch(rti, root);
+ Relation relation;
+ TupleDesc tupdesc;
+ TupleConstr *constr;
+
+ /* Assume we already have adequate lock */
+ relation = table_open(rte->relid, NoLock);
+
+ tupdesc = RelationGetDescr(relation);
+ constr = tupdesc->constr;
+
+ if (constr && constr->has_generated_stored)
+ {
+ for (int i = 0; i < constr->num_defval; i++)
+ {
+ AttrDefault *defval = &constr->defval[i];
+ Node *expr;
+ Bitmapset *attrs_used = NULL;
+
+ /* skip if not generated column */
+ if (!TupleDescAttr(tupdesc, defval->adnum - 1)->attgenerated)
+ continue;
+
+ /* identify columns this generated column depends on */
+ expr = stringToNode(defval->adbin);
+ pull_varattnos(expr, 1, &attrs_used);
+
+ if (bms_overlap(target_cols, attrs_used))
+ dependentCols = bms_add_member(dependentCols,
+ defval->adnum - FirstLowInvalidHeapAttributeNumber);
+ }
+ }
+
+ table_close(relation, NoLock);
+
+ return dependentCols;
+}
+
+/*
+ * set_relation_partition_info
+ *
+ * Set partitioning scheme and related information for a partitioned table.
+ */
+static void
+set_relation_partition_info(PlannerInfo *root, RelOptInfo *rel,
+ Relation relation)
+{
+ PartitionDesc partdesc;
+
+ /*
+ * Create the PartitionDirectory infrastructure if we didn't already.
+ */
+ if (root->glob->partition_directory == NULL)
+ {
+ root->glob->partition_directory =
+ CreatePartitionDirectory(CurrentMemoryContext, true);
+ }
+
+ partdesc = PartitionDirectoryLookup(root->glob->partition_directory,
+ relation);
+ rel->part_scheme = find_partition_scheme(root, relation);
+ Assert(partdesc != NULL && rel->part_scheme != NULL);
+ rel->boundinfo = partdesc->boundinfo;
+ rel->nparts = partdesc->nparts;
+ set_baserel_partition_key_exprs(relation, rel);
+ set_baserel_partition_constraint(relation, rel);
+}
+
+/*
+ * find_partition_scheme
+ *
+ * Find or create a PartitionScheme for this Relation.
+ */
+static PartitionScheme
+find_partition_scheme(PlannerInfo *root, Relation relation)
+{
+ PartitionKey partkey = RelationGetPartitionKey(relation);
+ ListCell *lc;
+ int partnatts,
+ i;
+ PartitionScheme part_scheme;
+
+ /* A partitioned table should have a partition key. */
+ Assert(partkey != NULL);
+
+ partnatts = partkey->partnatts;
+
+ /* Search for a matching partition scheme and return if found one. */
+ foreach(lc, root->part_schemes)
+ {
+ part_scheme = lfirst(lc);
+
+ /* Match partitioning strategy and number of keys. */
+ if (partkey->strategy != part_scheme->strategy ||
+ partnatts != part_scheme->partnatts)
+ continue;
+
+ /* Match partition key type properties. */
+ if (memcmp(partkey->partopfamily, part_scheme->partopfamily,
+ sizeof(Oid) * partnatts) != 0 ||
+ memcmp(partkey->partopcintype, part_scheme->partopcintype,
+ sizeof(Oid) * partnatts) != 0 ||
+ memcmp(partkey->partcollation, part_scheme->partcollation,
+ sizeof(Oid) * partnatts) != 0)
+ continue;
+
+ /*
+ * Length and byval information should match when partopcintype
+ * matches.
+ */
+ Assert(memcmp(partkey->parttyplen, part_scheme->parttyplen,
+ sizeof(int16) * partnatts) == 0);
+ Assert(memcmp(partkey->parttypbyval, part_scheme->parttypbyval,
+ sizeof(bool) * partnatts) == 0);
+
+ /*
+ * If partopfamily and partopcintype matched, must have the same
+ * partition comparison functions. Note that we cannot reliably
+ * Assert the equality of function structs themselves for they might
+ * be different across PartitionKey's, so just Assert for the function
+ * OIDs.
+ */
+#ifdef USE_ASSERT_CHECKING
+ for (i = 0; i < partkey->partnatts; i++)
+ Assert(partkey->partsupfunc[i].fn_oid ==
+ part_scheme->partsupfunc[i].fn_oid);
+#endif
+
+ /* Found matching partition scheme. */
+ return part_scheme;
+ }
+
+ /*
+ * Did not find matching partition scheme. Create one copying relevant
+ * information from the relcache. We need to copy the contents of the
+ * array since the relcache entry may not survive after we have closed the
+ * relation.
+ */
+ part_scheme = (PartitionScheme) palloc0(sizeof(PartitionSchemeData));
+ part_scheme->strategy = partkey->strategy;
+ part_scheme->partnatts = partkey->partnatts;
+
+ part_scheme->partopfamily = (Oid *) palloc(sizeof(Oid) * partnatts);
+ memcpy(part_scheme->partopfamily, partkey->partopfamily,
+ sizeof(Oid) * partnatts);
+
+ part_scheme->partopcintype = (Oid *) palloc(sizeof(Oid) * partnatts);
+ memcpy(part_scheme->partopcintype, partkey->partopcintype,
+ sizeof(Oid) * partnatts);
+
+ part_scheme->partcollation = (Oid *) palloc(sizeof(Oid) * partnatts);
+ memcpy(part_scheme->partcollation, partkey->partcollation,
+ sizeof(Oid) * partnatts);
+
+ part_scheme->parttyplen = (int16 *) palloc(sizeof(int16) * partnatts);
+ memcpy(part_scheme->parttyplen, partkey->parttyplen,
+ sizeof(int16) * partnatts);
+
+ part_scheme->parttypbyval = (bool *) palloc(sizeof(bool) * partnatts);
+ memcpy(part_scheme->parttypbyval, partkey->parttypbyval,
+ sizeof(bool) * partnatts);
+
+ part_scheme->partsupfunc = (FmgrInfo *)
+ palloc(sizeof(FmgrInfo) * partnatts);
+ for (i = 0; i < partnatts; i++)
+ fmgr_info_copy(&part_scheme->partsupfunc[i], &partkey->partsupfunc[i],
+ CurrentMemoryContext);
+
+ /* Add the partitioning scheme to PlannerInfo. */
+ root->part_schemes = lappend(root->part_schemes, part_scheme);
+
+ return part_scheme;
+}
+
+/*
+ * set_baserel_partition_key_exprs
+ *
+ * Builds partition key expressions for the given base relation and fills
+ * rel->partexprs.
+ */
+static void
+set_baserel_partition_key_exprs(Relation relation,
+ RelOptInfo *rel)
+{
+ PartitionKey partkey = RelationGetPartitionKey(relation);
+ int partnatts;
+ int cnt;
+ List **partexprs;
+ ListCell *lc;
+ Index varno = rel->relid;
+
+ Assert(IS_SIMPLE_REL(rel) && rel->relid > 0);
+
+ /* A partitioned table should have a partition key. */
+ Assert(partkey != NULL);
+
+ partnatts = partkey->partnatts;
+ partexprs = (List **) palloc(sizeof(List *) * partnatts);
+ lc = list_head(partkey->partexprs);
+
+ for (cnt = 0; cnt < partnatts; cnt++)
+ {
+ Expr *partexpr;
+ AttrNumber attno = partkey->partattrs[cnt];
+
+ if (attno != InvalidAttrNumber)
+ {
+ /* Single column partition key is stored as a Var node. */
+ Assert(attno > 0);
+
+ partexpr = (Expr *) makeVar(varno, attno,
+ partkey->parttypid[cnt],
+ partkey->parttypmod[cnt],
+ partkey->parttypcoll[cnt], 0);
+ }
+ else
+ {
+ if (lc == NULL)
+ elog(ERROR, "wrong number of partition key expressions");
+
+ /* Re-stamp the expression with given varno. */
+ partexpr = (Expr *) copyObject(lfirst(lc));
+ ChangeVarNodes((Node *) partexpr, 1, varno, 0);
+ lc = lnext(partkey->partexprs, lc);
+ }
+
+ /* Base relations have a single expression per key. */
+ partexprs[cnt] = list_make1(partexpr);
+ }
+
+ rel->partexprs = partexprs;
+
+ /*
+ * A base relation does not have nullable partition key expressions, since
+ * no outer join is involved. We still allocate an array of empty
+ * expression lists to keep partition key expression handling code simple.
+ * See build_joinrel_partition_info() and match_expr_to_partition_keys().
+ */
+ rel->nullable_partexprs = (List **) palloc0(sizeof(List *) * partnatts);
+}
+
+/*
+ * set_baserel_partition_constraint
+ *
+ * Builds the partition constraint for the given base relation and sets it
+ * in the given RelOptInfo. All Var nodes are restamped with the relid of the
+ * given relation.
+ */
+static void
+set_baserel_partition_constraint(Relation relation, RelOptInfo *rel)
+{
+ List *partconstr;
+
+ if (rel->partition_qual) /* already done */
+ return;
+
+ /*
+ * Run the partition quals through const-simplification similar to check
+ * constraints. We skip canonicalize_qual, though, because partition
+ * quals should be in canonical form already; also, since the qual is in
+ * implicit-AND format, we'd have to explicitly convert it to explicit-AND
+ * format and back again.
+ */
+ partconstr = RelationGetPartitionQual(relation);
+ if (partconstr)
+ {
+ partconstr = (List *) expression_planner((Expr *) partconstr);
+ if (rel->relid != 1)
+ ChangeVarNodes((Node *) partconstr, 1, rel->relid, 0);
+ rel->partition_qual = partconstr;
+ }
+}
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
new file mode 100644
index 0000000..182d5b1
--- /dev/null
+++ b/src/backend/optimizer/util/predtest.c
@@ -0,0 +1,2224 @@
+/*-------------------------------------------------------------------------
+ *
+ * predtest.c
+ * Routines to attempt to prove logical implications between predicate
+ * expressions.
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/predtest.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
+#include "executor/executor.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "nodes/pathnodes.h"
+#include "optimizer/optimizer.h"
+#include "utils/array.h"
+#include "utils/inval.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+
+
+/*
+ * Proof attempts involving large arrays in ScalarArrayOpExpr nodes are
+ * likely to require O(N^2) time, and more often than not fail anyway.
+ * So we set an arbitrary limit on the number of array elements that
+ * we will allow to be treated as an AND or OR clause.
+ * XXX is it worth exposing this as a GUC knob?
+ */
+#define MAX_SAOP_ARRAY_SIZE 100
+
+/*
+ * To avoid redundant coding in predicate_implied_by_recurse and
+ * predicate_refuted_by_recurse, we need to abstract out the notion of
+ * iterating over the components of an expression that is logically an AND
+ * or OR structure. There are multiple sorts of expression nodes that can
+ * be treated as ANDs or ORs, and we don't want to code each one separately.
+ * Hence, these types and support routines.
+ */
+typedef enum
+{
+ CLASS_ATOM, /* expression that's not AND or OR */
+ CLASS_AND, /* expression with AND semantics */
+ CLASS_OR /* expression with OR semantics */
+} PredClass;
+
+typedef struct PredIterInfoData *PredIterInfo;
+
+typedef struct PredIterInfoData
+{
+ /* node-type-specific iteration state */
+ void *state;
+ List *state_list;
+ /* initialize to do the iteration */
+ void (*startup_fn) (Node *clause, PredIterInfo info);
+ /* next-component iteration function */
+ Node *(*next_fn) (PredIterInfo info);
+ /* release resources when done with iteration */
+ void (*cleanup_fn) (PredIterInfo info);
+} PredIterInfoData;
+
+#define iterate_begin(item, clause, info) \
+ do { \
+ Node *item; \
+ (info).startup_fn((clause), &(info)); \
+ while ((item = (info).next_fn(&(info))) != NULL)
+
+#define iterate_end(info) \
+ (info).cleanup_fn(&(info)); \
+ } while (0)
+
+
+static bool predicate_implied_by_recurse(Node *clause, Node *predicate,
+ bool weak);
+static bool predicate_refuted_by_recurse(Node *clause, Node *predicate,
+ bool weak);
+static PredClass predicate_classify(Node *clause, PredIterInfo info);
+static void list_startup_fn(Node *clause, PredIterInfo info);
+static Node *list_next_fn(PredIterInfo info);
+static void list_cleanup_fn(PredIterInfo info);
+static void boolexpr_startup_fn(Node *clause, PredIterInfo info);
+static void arrayconst_startup_fn(Node *clause, PredIterInfo info);
+static Node *arrayconst_next_fn(PredIterInfo info);
+static void arrayconst_cleanup_fn(PredIterInfo info);
+static void arrayexpr_startup_fn(Node *clause, PredIterInfo info);
+static Node *arrayexpr_next_fn(PredIterInfo info);
+static void arrayexpr_cleanup_fn(PredIterInfo info);
+static bool predicate_implied_by_simple_clause(Expr *predicate, Node *clause,
+ bool weak);
+static bool predicate_refuted_by_simple_clause(Expr *predicate, Node *clause,
+ bool weak);
+static Node *extract_not_arg(Node *clause);
+static Node *extract_strong_not_arg(Node *clause);
+static bool clause_is_strict_for(Node *clause, Node *subexpr, bool allow_false);
+static bool operator_predicate_proof(Expr *predicate, Node *clause,
+ bool refute_it, bool weak);
+static bool operator_same_subexprs_proof(Oid pred_op, Oid clause_op,
+ bool refute_it);
+static bool operator_same_subexprs_lookup(Oid pred_op, Oid clause_op,
+ bool refute_it);
+static Oid get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it);
+static void InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashvalue);
+
+
+/*
+ * predicate_implied_by
+ * Recursively checks whether the clauses in clause_list imply that the
+ * given predicate is true.
+ *
+ * We support two definitions of implication:
+ *
+ * "Strong" implication: A implies B means that truth of A implies truth of B.
+ * We use this to prove that a row satisfying one WHERE clause or index
+ * predicate must satisfy another one.
+ *
+ * "Weak" implication: A implies B means that non-falsity of A implies
+ * non-falsity of B ("non-false" means "either true or NULL"). We use this to
+ * prove that a row satisfying one CHECK constraint must satisfy another one.
+ *
+ * Strong implication can also be used to prove that a WHERE clause implies a
+ * CHECK constraint, although it will fail to prove a few cases where we could
+ * safely conclude that the implication holds. There's no support for proving
+ * the converse case, since only a few kinds of CHECK constraint would allow
+ * deducing anything.
+ *
+ * The top-level List structure of each list corresponds to an AND list.
+ * We assume that eval_const_expressions() has been applied and so there
+ * are no un-flattened ANDs or ORs (e.g., no AND immediately within an AND,
+ * including AND just below the top-level List structure).
+ * If this is not true we might fail to prove an implication that is
+ * valid, but no worse consequences will ensue.
+ *
+ * We assume the predicate has already been checked to contain only
+ * immutable functions and operators. (In many current uses this is known
+ * true because the predicate is part of an index predicate that has passed
+ * CheckPredicate(); otherwise, the caller must check it.) We dare not make
+ * deductions based on non-immutable functions, because they might change
+ * answers between the time we make the plan and the time we execute the plan.
+ * Immutability of functions in the clause_list is checked here, if necessary.
+ */
+bool
+predicate_implied_by(List *predicate_list, List *clause_list,
+ bool weak)
+{
+ Node *p,
+ *c;
+
+ if (predicate_list == NIL)
+ return true; /* no predicate: implication is vacuous */
+ if (clause_list == NIL)
+ return false; /* no restriction: implication must fail */
+
+ /*
+ * If either input is a single-element list, replace it with its lone
+ * member; this avoids one useless level of AND-recursion. We only need
+ * to worry about this at top level, since eval_const_expressions should
+ * have gotten rid of any trivial ANDs or ORs below that.
+ */
+ if (list_length(predicate_list) == 1)
+ p = (Node *) linitial(predicate_list);
+ else
+ p = (Node *) predicate_list;
+ if (list_length(clause_list) == 1)
+ c = (Node *) linitial(clause_list);
+ else
+ c = (Node *) clause_list;
+
+ /* And away we go ... */
+ return predicate_implied_by_recurse(c, p, weak);
+}
+
+/*
+ * predicate_refuted_by
+ * Recursively checks whether the clauses in clause_list refute the given
+ * predicate (that is, prove it false).
+ *
+ * This is NOT the same as !(predicate_implied_by), though it is similar
+ * in the technique and structure of the code.
+ *
+ * We support two definitions of refutation:
+ *
+ * "Strong" refutation: A refutes B means truth of A implies falsity of B.
+ * We use this to disprove a CHECK constraint given a WHERE clause, i.e.,
+ * prove that any row satisfying the WHERE clause would violate the CHECK
+ * constraint. (Observe we must prove B yields false, not just not-true.)
+ *
+ * "Weak" refutation: A refutes B means truth of A implies non-truth of B
+ * (i.e., B must yield false or NULL). We use this to detect mutually
+ * contradictory WHERE clauses.
+ *
+ * Weak refutation can be proven in some cases where strong refutation doesn't
+ * hold, so it's useful to use it when possible. We don't currently have
+ * support for disproving one CHECK constraint based on another one, nor for
+ * disproving WHERE based on CHECK. (As with implication, the last case
+ * doesn't seem very practical. CHECK-vs-CHECK might be useful, but isn't
+ * currently needed anywhere.)
+ *
+ * The top-level List structure of each list corresponds to an AND list.
+ * We assume that eval_const_expressions() has been applied and so there
+ * are no un-flattened ANDs or ORs (e.g., no AND immediately within an AND,
+ * including AND just below the top-level List structure).
+ * If this is not true we might fail to prove an implication that is
+ * valid, but no worse consequences will ensue.
+ *
+ * We assume the predicate has already been checked to contain only
+ * immutable functions and operators. We dare not make deductions based on
+ * non-immutable functions, because they might change answers between the
+ * time we make the plan and the time we execute the plan.
+ * Immutability of functions in the clause_list is checked here, if necessary.
+ */
+bool
+predicate_refuted_by(List *predicate_list, List *clause_list,
+ bool weak)
+{
+ Node *p,
+ *c;
+
+ if (predicate_list == NIL)
+ return false; /* no predicate: no refutation is possible */
+ if (clause_list == NIL)
+ return false; /* no restriction: refutation must fail */
+
+ /*
+ * If either input is a single-element list, replace it with its lone
+ * member; this avoids one useless level of AND-recursion. We only need
+ * to worry about this at top level, since eval_const_expressions should
+ * have gotten rid of any trivial ANDs or ORs below that.
+ */
+ if (list_length(predicate_list) == 1)
+ p = (Node *) linitial(predicate_list);
+ else
+ p = (Node *) predicate_list;
+ if (list_length(clause_list) == 1)
+ c = (Node *) linitial(clause_list);
+ else
+ c = (Node *) clause_list;
+
+ /* And away we go ... */
+ return predicate_refuted_by_recurse(c, p, weak);
+}
+
+/*----------
+ * predicate_implied_by_recurse
+ * Does the predicate implication test for non-NULL restriction and
+ * predicate clauses.
+ *
+ * The logic followed here is ("=>" means "implies"):
+ * atom A => atom B iff: predicate_implied_by_simple_clause says so
+ * atom A => AND-expr B iff: A => each of B's components
+ * atom A => OR-expr B iff: A => any of B's components
+ * AND-expr A => atom B iff: any of A's components => B
+ * AND-expr A => AND-expr B iff: A => each of B's components
+ * AND-expr A => OR-expr B iff: A => any of B's components,
+ * *or* any of A's components => B
+ * OR-expr A => atom B iff: each of A's components => B
+ * OR-expr A => AND-expr B iff: A => each of B's components
+ * OR-expr A => OR-expr B iff: each of A's components => any of B's
+ *
+ * An "atom" is anything other than an AND or OR node. Notice that we don't
+ * have any special logic to handle NOT nodes; these should have been pushed
+ * down or eliminated where feasible during eval_const_expressions().
+ *
+ * All of these rules apply equally to strong or weak implication.
+ *
+ * We can't recursively expand either side first, but have to interleave
+ * the expansions per the above rules, to be sure we handle all of these
+ * examples:
+ * (x OR y) => (x OR y OR z)
+ * (x AND y AND z) => (x AND y)
+ * (x AND y) => ((x AND y) OR z)
+ * ((x OR y) AND z) => (x OR y)
+ * This is still not an exhaustive test, but it handles most normal cases
+ * under the assumption that both inputs have been AND/OR flattened.
+ *
+ * We have to be prepared to handle RestrictInfo nodes in the restrictinfo
+ * tree, though not in the predicate tree.
+ *----------
+ */
+static bool
+predicate_implied_by_recurse(Node *clause, Node *predicate,
+ bool weak)
+{
+ PredIterInfoData clause_info;
+ PredIterInfoData pred_info;
+ PredClass pclass;
+ bool result;
+
+ /* skip through RestrictInfo */
+ Assert(clause != NULL);
+ if (IsA(clause, RestrictInfo))
+ clause = (Node *) ((RestrictInfo *) clause)->clause;
+
+ pclass = predicate_classify(predicate, &pred_info);
+
+ switch (predicate_classify(clause, &clause_info))
+ {
+ case CLASS_AND:
+ switch (pclass)
+ {
+ case CLASS_AND:
+
+ /*
+ * AND-clause => AND-clause if A implies each of B's items
+ */
+ result = true;
+ iterate_begin(pitem, predicate, pred_info)
+ {
+ if (!predicate_implied_by_recurse(clause, pitem,
+ weak))
+ {
+ result = false;
+ break;
+ }
+ }
+ iterate_end(pred_info);
+ return result;
+
+ case CLASS_OR:
+
+ /*
+ * AND-clause => OR-clause if A implies any of B's items
+ *
+ * Needed to handle (x AND y) => ((x AND y) OR z)
+ */
+ result = false;
+ iterate_begin(pitem, predicate, pred_info)
+ {
+ if (predicate_implied_by_recurse(clause, pitem,
+ weak))
+ {
+ result = true;
+ break;
+ }
+ }
+ iterate_end(pred_info);
+ if (result)
+ return result;
+
+ /*
+ * Also check if any of A's items implies B
+ *
+ * Needed to handle ((x OR y) AND z) => (x OR y)
+ */
+ iterate_begin(citem, clause, clause_info)
+ {
+ if (predicate_implied_by_recurse(citem, predicate,
+ weak))
+ {
+ result = true;
+ break;
+ }
+ }
+ iterate_end(clause_info);
+ return result;
+
+ case CLASS_ATOM:
+
+ /*
+ * AND-clause => atom if any of A's items implies B
+ */
+ result = false;
+ iterate_begin(citem, clause, clause_info)
+ {
+ if (predicate_implied_by_recurse(citem, predicate,
+ weak))
+ {
+ result = true;
+ break;
+ }
+ }
+ iterate_end(clause_info);
+ return result;
+ }
+ break;
+
+ case CLASS_OR:
+ switch (pclass)
+ {
+ case CLASS_OR:
+
+ /*
+ * OR-clause => OR-clause if each of A's items implies any
+ * of B's items. Messy but can't do it any more simply.
+ */
+ result = true;
+ iterate_begin(citem, clause, clause_info)
+ {
+ bool presult = false;
+
+ iterate_begin(pitem, predicate, pred_info)
+ {
+ if (predicate_implied_by_recurse(citem, pitem,
+ weak))
+ {
+ presult = true;
+ break;
+ }
+ }
+ iterate_end(pred_info);
+ if (!presult)
+ {
+ result = false; /* doesn't imply any of B's */
+ break;
+ }
+ }
+ iterate_end(clause_info);
+ return result;
+
+ case CLASS_AND:
+ case CLASS_ATOM:
+
+ /*
+ * OR-clause => AND-clause if each of A's items implies B
+ *
+ * OR-clause => atom if each of A's items implies B
+ */
+ result = true;
+ iterate_begin(citem, clause, clause_info)
+ {
+ if (!predicate_implied_by_recurse(citem, predicate,
+ weak))
+ {
+ result = false;
+ break;
+ }
+ }
+ iterate_end(clause_info);
+ return result;
+ }
+ break;
+
+ case CLASS_ATOM:
+ switch (pclass)
+ {
+ case CLASS_AND:
+
+ /*
+ * atom => AND-clause if A implies each of B's items
+ */
+ result = true;
+ iterate_begin(pitem, predicate, pred_info)
+ {
+ if (!predicate_implied_by_recurse(clause, pitem,
+ weak))
+ {
+ result = false;
+ break;
+ }
+ }
+ iterate_end(pred_info);
+ return result;
+
+ case CLASS_OR:
+
+ /*
+ * atom => OR-clause if A implies any of B's items
+ */
+ result = false;
+ iterate_begin(pitem, predicate, pred_info)
+ {
+ if (predicate_implied_by_recurse(clause, pitem,
+ weak))
+ {
+ result = true;
+ break;
+ }
+ }
+ iterate_end(pred_info);
+ return result;
+
+ case CLASS_ATOM:
+
+ /*
+ * atom => atom is the base case
+ */
+ return
+ predicate_implied_by_simple_clause((Expr *) predicate,
+ clause,
+ weak);
+ }
+ break;
+ }
+
+ /* can't get here */
+ elog(ERROR, "predicate_classify returned a bogus value");
+ return false;
+}
+
+/*----------
+ * predicate_refuted_by_recurse
+ * Does the predicate refutation test for non-NULL restriction and
+ * predicate clauses.
+ *
+ * The logic followed here is ("R=>" means "refutes"):
+ * atom A R=> atom B iff: predicate_refuted_by_simple_clause says so
+ * atom A R=> AND-expr B iff: A R=> any of B's components
+ * atom A R=> OR-expr B iff: A R=> each of B's components
+ * AND-expr A R=> atom B iff: any of A's components R=> B
+ * AND-expr A R=> AND-expr B iff: A R=> any of B's components,
+ * *or* any of A's components R=> B
+ * AND-expr A R=> OR-expr B iff: A R=> each of B's components
+ * OR-expr A R=> atom B iff: each of A's components R=> B
+ * OR-expr A R=> AND-expr B iff: each of A's components R=> any of B's
+ * OR-expr A R=> OR-expr B iff: A R=> each of B's components
+ *
+ * All of the above rules apply equally to strong or weak refutation.
+ *
+ * In addition, if the predicate is a NOT-clause then we can use
+ * A R=> NOT B if: A => B
+ * This works for several different SQL constructs that assert the non-truth
+ * of their argument, ie NOT, IS FALSE, IS NOT TRUE, IS UNKNOWN, although some
+ * of them require that we prove strong implication. Likewise, we can use
+ * NOT A R=> B if: B => A
+ * but here we must be careful about strong vs. weak refutation and make
+ * the appropriate type of implication proof (weak or strong respectively).
+ *
+ * Other comments are as for predicate_implied_by_recurse().
+ *----------
+ */
+static bool
+predicate_refuted_by_recurse(Node *clause, Node *predicate,
+ bool weak)
+{
+ PredIterInfoData clause_info;
+ PredIterInfoData pred_info;
+ PredClass pclass;
+ Node *not_arg;
+ bool result;
+
+ /* skip through RestrictInfo */
+ Assert(clause != NULL);
+ if (IsA(clause, RestrictInfo))
+ clause = (Node *) ((RestrictInfo *) clause)->clause;
+
+ pclass = predicate_classify(predicate, &pred_info);
+
+ switch (predicate_classify(clause, &clause_info))
+ {
+ case CLASS_AND:
+ switch (pclass)
+ {
+ case CLASS_AND:
+
+ /*
+ * AND-clause R=> AND-clause if A refutes any of B's items
+ *
+ * Needed to handle (x AND y) R=> ((!x OR !y) AND z)
+ */
+ result = false;
+ iterate_begin(pitem, predicate, pred_info)
+ {
+ if (predicate_refuted_by_recurse(clause, pitem,
+ weak))
+ {
+ result = true;
+ break;
+ }
+ }
+ iterate_end(pred_info);
+ if (result)
+ return result;
+
+ /*
+ * Also check if any of A's items refutes B
+ *
+ * Needed to handle ((x OR y) AND z) R=> (!x AND !y)
+ */
+ iterate_begin(citem, clause, clause_info)
+ {
+ if (predicate_refuted_by_recurse(citem, predicate,
+ weak))
+ {
+ result = true;
+ break;
+ }
+ }
+ iterate_end(clause_info);
+ return result;
+
+ case CLASS_OR:
+
+ /*
+ * AND-clause R=> OR-clause if A refutes each of B's items
+ */
+ result = true;
+ iterate_begin(pitem, predicate, pred_info)
+ {
+ if (!predicate_refuted_by_recurse(clause, pitem,
+ weak))
+ {
+ result = false;
+ break;
+ }
+ }
+ iterate_end(pred_info);
+ return result;
+
+ case CLASS_ATOM:
+
+ /*
+ * If B is a NOT-type clause, A R=> B if A => B's arg
+ *
+ * Since, for either type of refutation, we are starting
+ * with the premise that A is true, we can use a strong
+ * implication test in all cases. That proves B's arg is
+ * true, which is more than we need for weak refutation if
+ * B is a simple NOT, but it allows not worrying about
+ * exactly which kind of negation clause we have.
+ */
+ not_arg = extract_not_arg(predicate);
+ if (not_arg &&
+ predicate_implied_by_recurse(clause, not_arg,
+ false))
+ return true;
+
+ /*
+ * AND-clause R=> atom if any of A's items refutes B
+ */
+ result = false;
+ iterate_begin(citem, clause, clause_info)
+ {
+ if (predicate_refuted_by_recurse(citem, predicate,
+ weak))
+ {
+ result = true;
+ break;
+ }
+ }
+ iterate_end(clause_info);
+ return result;
+ }
+ break;
+
+ case CLASS_OR:
+ switch (pclass)
+ {
+ case CLASS_OR:
+
+ /*
+ * OR-clause R=> OR-clause if A refutes each of B's items
+ */
+ result = true;
+ iterate_begin(pitem, predicate, pred_info)
+ {
+ if (!predicate_refuted_by_recurse(clause, pitem,
+ weak))
+ {
+ result = false;
+ break;
+ }
+ }
+ iterate_end(pred_info);
+ return result;
+
+ case CLASS_AND:
+
+ /*
+ * OR-clause R=> AND-clause if each of A's items refutes
+ * any of B's items.
+ */
+ result = true;
+ iterate_begin(citem, clause, clause_info)
+ {
+ bool presult = false;
+
+ iterate_begin(pitem, predicate, pred_info)
+ {
+ if (predicate_refuted_by_recurse(citem, pitem,
+ weak))
+ {
+ presult = true;
+ break;
+ }
+ }
+ iterate_end(pred_info);
+ if (!presult)
+ {
+ result = false; /* citem refutes nothing */
+ break;
+ }
+ }
+ iterate_end(clause_info);
+ return result;
+
+ case CLASS_ATOM:
+
+ /*
+ * If B is a NOT-type clause, A R=> B if A => B's arg
+ *
+ * Same logic as for the AND-clause case above.
+ */
+ not_arg = extract_not_arg(predicate);
+ if (not_arg &&
+ predicate_implied_by_recurse(clause, not_arg,
+ false))
+ return true;
+
+ /*
+ * OR-clause R=> atom if each of A's items refutes B
+ */
+ result = true;
+ iterate_begin(citem, clause, clause_info)
+ {
+ if (!predicate_refuted_by_recurse(citem, predicate,
+ weak))
+ {
+ result = false;
+ break;
+ }
+ }
+ iterate_end(clause_info);
+ return result;
+ }
+ break;
+
+ case CLASS_ATOM:
+
+ /*
+ * If A is a strong NOT-clause, A R=> B if B => A's arg
+ *
+ * Since A is strong, we may assume A's arg is false (not just
+ * not-true). If B weakly implies A's arg, then B can be neither
+ * true nor null, so that strong refutation is proven. If B
+ * strongly implies A's arg, then B cannot be true, so that weak
+ * refutation is proven.
+ */
+ not_arg = extract_strong_not_arg(clause);
+ if (not_arg &&
+ predicate_implied_by_recurse(predicate, not_arg,
+ !weak))
+ return true;
+
+ switch (pclass)
+ {
+ case CLASS_AND:
+
+ /*
+ * atom R=> AND-clause if A refutes any of B's items
+ */
+ result = false;
+ iterate_begin(pitem, predicate, pred_info)
+ {
+ if (predicate_refuted_by_recurse(clause, pitem,
+ weak))
+ {
+ result = true;
+ break;
+ }
+ }
+ iterate_end(pred_info);
+ return result;
+
+ case CLASS_OR:
+
+ /*
+ * atom R=> OR-clause if A refutes each of B's items
+ */
+ result = true;
+ iterate_begin(pitem, predicate, pred_info)
+ {
+ if (!predicate_refuted_by_recurse(clause, pitem,
+ weak))
+ {
+ result = false;
+ break;
+ }
+ }
+ iterate_end(pred_info);
+ return result;
+
+ case CLASS_ATOM:
+
+ /*
+ * If B is a NOT-type clause, A R=> B if A => B's arg
+ *
+ * Same logic as for the AND-clause case above.
+ */
+ not_arg = extract_not_arg(predicate);
+ if (not_arg &&
+ predicate_implied_by_recurse(clause, not_arg,
+ false))
+ return true;
+
+ /*
+ * atom R=> atom is the base case
+ */
+ return
+ predicate_refuted_by_simple_clause((Expr *) predicate,
+ clause,
+ weak);
+ }
+ break;
+ }
+
+ /* can't get here */
+ elog(ERROR, "predicate_classify returned a bogus value");
+ return false;
+}
+
+
+/*
+ * predicate_classify
+ * Classify an expression node as AND-type, OR-type, or neither (an atom).
+ *
+ * If the expression is classified as AND- or OR-type, then *info is filled
+ * in with the functions needed to iterate over its components.
+ *
+ * This function also implements enforcement of MAX_SAOP_ARRAY_SIZE: if a
+ * ScalarArrayOpExpr's array has too many elements, we just classify it as an
+ * atom. (This will result in its being passed as-is to the simple_clause
+ * functions, many of which will fail to prove anything about it.) Note that we
+ * cannot just stop after considering MAX_SAOP_ARRAY_SIZE elements; in general
+ * that would result in wrong proofs, rather than failing to prove anything.
+ */
+static PredClass
+predicate_classify(Node *clause, PredIterInfo info)
+{
+ /* Caller should not pass us NULL, nor a RestrictInfo clause */
+ Assert(clause != NULL);
+ Assert(!IsA(clause, RestrictInfo));
+
+ /*
+ * If we see a List, assume it's an implicit-AND list; this is the correct
+ * semantics for lists of RestrictInfo nodes.
+ */
+ if (IsA(clause, List))
+ {
+ info->startup_fn = list_startup_fn;
+ info->next_fn = list_next_fn;
+ info->cleanup_fn = list_cleanup_fn;
+ return CLASS_AND;
+ }
+
+ /* Handle normal AND and OR boolean clauses */
+ if (is_andclause(clause))
+ {
+ info->startup_fn = boolexpr_startup_fn;
+ info->next_fn = list_next_fn;
+ info->cleanup_fn = list_cleanup_fn;
+ return CLASS_AND;
+ }
+ if (is_orclause(clause))
+ {
+ info->startup_fn = boolexpr_startup_fn;
+ info->next_fn = list_next_fn;
+ info->cleanup_fn = list_cleanup_fn;
+ return CLASS_OR;
+ }
+
+ /* Handle ScalarArrayOpExpr */
+ if (IsA(clause, ScalarArrayOpExpr))
+ {
+ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
+ Node *arraynode = (Node *) lsecond(saop->args);
+
+ /*
+ * We can break this down into an AND or OR structure, but only if we
+ * know how to iterate through expressions for the array's elements.
+ * We can do that if the array operand is a non-null constant or a
+ * simple ArrayExpr.
+ */
+ if (arraynode && IsA(arraynode, Const) &&
+ !((Const *) arraynode)->constisnull)
+ {
+ ArrayType *arrayval;
+ int nelems;
+
+ arrayval = DatumGetArrayTypeP(((Const *) arraynode)->constvalue);
+ nelems = ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
+ if (nelems <= MAX_SAOP_ARRAY_SIZE)
+ {
+ info->startup_fn = arrayconst_startup_fn;
+ info->next_fn = arrayconst_next_fn;
+ info->cleanup_fn = arrayconst_cleanup_fn;
+ return saop->useOr ? CLASS_OR : CLASS_AND;
+ }
+ }
+ else if (arraynode && IsA(arraynode, ArrayExpr) &&
+ !((ArrayExpr *) arraynode)->multidims &&
+ list_length(((ArrayExpr *) arraynode)->elements) <= MAX_SAOP_ARRAY_SIZE)
+ {
+ info->startup_fn = arrayexpr_startup_fn;
+ info->next_fn = arrayexpr_next_fn;
+ info->cleanup_fn = arrayexpr_cleanup_fn;
+ return saop->useOr ? CLASS_OR : CLASS_AND;
+ }
+ }
+
+ /* None of the above, so it's an atom */
+ return CLASS_ATOM;
+}
+
+/*
+ * PredIterInfo routines for iterating over regular Lists. The iteration
+ * state variable is the next ListCell to visit.
+ */
+static void
+list_startup_fn(Node *clause, PredIterInfo info)
+{
+ info->state_list = (List *) clause;
+ info->state = (void *) list_head(info->state_list);
+}
+
+static Node *
+list_next_fn(PredIterInfo info)
+{
+ ListCell *l = (ListCell *) info->state;
+ Node *n;
+
+ if (l == NULL)
+ return NULL;
+ n = lfirst(l);
+ info->state = (void *) lnext(info->state_list, l);
+ return n;
+}
+
+static void
+list_cleanup_fn(PredIterInfo info)
+{
+ /* Nothing to clean up */
+}
+
+/*
+ * BoolExpr needs its own startup function, but can use list_next_fn and
+ * list_cleanup_fn.
+ */
+static void
+boolexpr_startup_fn(Node *clause, PredIterInfo info)
+{
+ info->state_list = ((BoolExpr *) clause)->args;
+ info->state = (void *) list_head(info->state_list);
+}
+
+/*
+ * PredIterInfo routines for iterating over a ScalarArrayOpExpr with a
+ * constant array operand.
+ */
+typedef struct
+{
+ OpExpr opexpr;
+ Const constexpr;
+ int next_elem;
+ int num_elems;
+ Datum *elem_values;
+ bool *elem_nulls;
+} ArrayConstIterState;
+
+static void
+arrayconst_startup_fn(Node *clause, PredIterInfo info)
+{
+ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
+ ArrayConstIterState *state;
+ Const *arrayconst;
+ ArrayType *arrayval;
+ int16 elmlen;
+ bool elmbyval;
+ char elmalign;
+
+ /* Create working state struct */
+ state = (ArrayConstIterState *) palloc(sizeof(ArrayConstIterState));
+ info->state = (void *) state;
+
+ /* Deconstruct the array literal */
+ arrayconst = (Const *) lsecond(saop->args);
+ arrayval = DatumGetArrayTypeP(arrayconst->constvalue);
+ get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
+ &elmlen, &elmbyval, &elmalign);
+ deconstruct_array(arrayval,
+ ARR_ELEMTYPE(arrayval),
+ elmlen, elmbyval, elmalign,
+ &state->elem_values, &state->elem_nulls,
+ &state->num_elems);
+
+ /* Set up a dummy OpExpr to return as the per-item node */
+ state->opexpr.xpr.type = T_OpExpr;
+ state->opexpr.opno = saop->opno;
+ state->opexpr.opfuncid = saop->opfuncid;
+ state->opexpr.opresulttype = BOOLOID;
+ state->opexpr.opretset = false;
+ state->opexpr.opcollid = InvalidOid;
+ state->opexpr.inputcollid = saop->inputcollid;
+ state->opexpr.args = list_copy(saop->args);
+
+ /* Set up a dummy Const node to hold the per-element values */
+ state->constexpr.xpr.type = T_Const;
+ state->constexpr.consttype = ARR_ELEMTYPE(arrayval);
+ state->constexpr.consttypmod = -1;
+ state->constexpr.constcollid = arrayconst->constcollid;
+ state->constexpr.constlen = elmlen;
+ state->constexpr.constbyval = elmbyval;
+ lsecond(state->opexpr.args) = &state->constexpr;
+
+ /* Initialize iteration state */
+ state->next_elem = 0;
+}
+
+static Node *
+arrayconst_next_fn(PredIterInfo info)
+{
+ ArrayConstIterState *state = (ArrayConstIterState *) info->state;
+
+ if (state->next_elem >= state->num_elems)
+ return NULL;
+ state->constexpr.constvalue = state->elem_values[state->next_elem];
+ state->constexpr.constisnull = state->elem_nulls[state->next_elem];
+ state->next_elem++;
+ return (Node *) &(state->opexpr);
+}
+
+static void
+arrayconst_cleanup_fn(PredIterInfo info)
+{
+ ArrayConstIterState *state = (ArrayConstIterState *) info->state;
+
+ pfree(state->elem_values);
+ pfree(state->elem_nulls);
+ list_free(state->opexpr.args);
+ pfree(state);
+}
+
+/*
+ * PredIterInfo routines for iterating over a ScalarArrayOpExpr with a
+ * one-dimensional ArrayExpr array operand.
+ */
+typedef struct
+{
+ OpExpr opexpr;
+ ListCell *next;
+} ArrayExprIterState;
+
+static void
+arrayexpr_startup_fn(Node *clause, PredIterInfo info)
+{
+ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
+ ArrayExprIterState *state;
+ ArrayExpr *arrayexpr;
+
+ /* Create working state struct */
+ state = (ArrayExprIterState *) palloc(sizeof(ArrayExprIterState));
+ info->state = (void *) state;
+
+ /* Set up a dummy OpExpr to return as the per-item node */
+ state->opexpr.xpr.type = T_OpExpr;
+ state->opexpr.opno = saop->opno;
+ state->opexpr.opfuncid = saop->opfuncid;
+ state->opexpr.opresulttype = BOOLOID;
+ state->opexpr.opretset = false;
+ state->opexpr.opcollid = InvalidOid;
+ state->opexpr.inputcollid = saop->inputcollid;
+ state->opexpr.args = list_copy(saop->args);
+
+ /* Initialize iteration variable to first member of ArrayExpr */
+ arrayexpr = (ArrayExpr *) lsecond(saop->args);
+ info->state_list = arrayexpr->elements;
+ state->next = list_head(arrayexpr->elements);
+}
+
+static Node *
+arrayexpr_next_fn(PredIterInfo info)
+{
+ ArrayExprIterState *state = (ArrayExprIterState *) info->state;
+
+ if (state->next == NULL)
+ return NULL;
+ lsecond(state->opexpr.args) = lfirst(state->next);
+ state->next = lnext(info->state_list, state->next);
+ return (Node *) &(state->opexpr);
+}
+
+static void
+arrayexpr_cleanup_fn(PredIterInfo info)
+{
+ ArrayExprIterState *state = (ArrayExprIterState *) info->state;
+
+ list_free(state->opexpr.args);
+ pfree(state);
+}
+
+
+/*----------
+ * predicate_implied_by_simple_clause
+ * Does the predicate implication test for a "simple clause" predicate
+ * and a "simple clause" restriction.
+ *
+ * We return true if able to prove the implication, false if not.
+ *
+ * We have three strategies for determining whether one simple clause
+ * implies another:
+ *
+ * A simple and general way is to see if they are equal(); this works for any
+ * kind of expression, and for either implication definition. (Actually,
+ * there is an implied assumption that the functions in the expression are
+ * immutable --- but this was checked for the predicate by the caller.)
+ *
+ * If the predicate is of the form "foo IS NOT NULL", and we are considering
+ * strong implication, we can conclude that the predicate is implied if the
+ * clause is strict for "foo", i.e., it must yield false or NULL when "foo"
+ * is NULL. In that case truth of the clause ensures that "foo" isn't NULL.
+ * (Again, this is a safe conclusion because "foo" must be immutable.)
+ * This doesn't work for weak implication, though.
+ *
+ * Finally, if both clauses are binary operator expressions, we may be able
+ * to prove something using the system's knowledge about operators; those
+ * proof rules are encapsulated in operator_predicate_proof().
+ *----------
+ */
+static bool
+predicate_implied_by_simple_clause(Expr *predicate, Node *clause,
+ bool weak)
+{
+ /* Allow interrupting long proof attempts */
+ CHECK_FOR_INTERRUPTS();
+
+ /* First try the equal() test */
+ if (equal((Node *) predicate, clause))
+ return true;
+
+ /* Next try the IS NOT NULL case */
+ if (!weak &&
+ predicate && IsA(predicate, NullTest))
+ {
+ NullTest *ntest = (NullTest *) predicate;
+
+ /* row IS NOT NULL does not act in the simple way we have in mind */
+ if (ntest->nulltesttype == IS_NOT_NULL &&
+ !ntest->argisrow)
+ {
+ /* strictness of clause for foo implies foo IS NOT NULL */
+ if (clause_is_strict_for(clause, (Node *) ntest->arg, true))
+ return true;
+ }
+ return false; /* we can't succeed below... */
+ }
+
+ /* Else try operator-related knowledge */
+ return operator_predicate_proof(predicate, clause, false, weak);
+}
+
+/*----------
+ * predicate_refuted_by_simple_clause
+ * Does the predicate refutation test for a "simple clause" predicate
+ * and a "simple clause" restriction.
+ *
+ * We return true if able to prove the refutation, false if not.
+ *
+ * Unlike the implication case, checking for equal() clauses isn't helpful.
+ * But relation_excluded_by_constraints() checks for self-contradictions in a
+ * list of clauses, so that we may get here with predicate and clause being
+ * actually pointer-equal, and that is worth eliminating quickly.
+ *
+ * When the predicate is of the form "foo IS NULL", we can conclude that
+ * the predicate is refuted if the clause is strict for "foo" (see notes for
+ * implication case), or is "foo IS NOT NULL". That works for either strong
+ * or weak refutation.
+ *
+ * A clause "foo IS NULL" refutes a predicate "foo IS NOT NULL" in all cases.
+ * If we are considering weak refutation, it also refutes a predicate that
+ * is strict for "foo", since then the predicate must yield false or NULL
+ * (and since "foo" appears in the predicate, it's known immutable).
+ *
+ * (The main motivation for covering these IS [NOT] NULL cases is to support
+ * using IS NULL/IS NOT NULL as partition-defining constraints.)
+ *
+ * Finally, if both clauses are binary operator expressions, we may be able
+ * to prove something using the system's knowledge about operators; those
+ * proof rules are encapsulated in operator_predicate_proof().
+ *----------
+ */
+static bool
+predicate_refuted_by_simple_clause(Expr *predicate, Node *clause,
+ bool weak)
+{
+ /* Allow interrupting long proof attempts */
+ CHECK_FOR_INTERRUPTS();
+
+ /* A simple clause can't refute itself */
+ /* Worth checking because of relation_excluded_by_constraints() */
+ if ((Node *) predicate == clause)
+ return false;
+
+ /* Try the predicate-IS-NULL case */
+ if (predicate && IsA(predicate, NullTest) &&
+ ((NullTest *) predicate)->nulltesttype == IS_NULL)
+ {
+ Expr *isnullarg = ((NullTest *) predicate)->arg;
+
+ /* row IS NULL does not act in the simple way we have in mind */
+ if (((NullTest *) predicate)->argisrow)
+ return false;
+
+ /* strictness of clause for foo refutes foo IS NULL */
+ if (clause_is_strict_for(clause, (Node *) isnullarg, true))
+ return true;
+
+ /* foo IS NOT NULL refutes foo IS NULL */
+ if (clause && IsA(clause, NullTest) &&
+ ((NullTest *) clause)->nulltesttype == IS_NOT_NULL &&
+ !((NullTest *) clause)->argisrow &&
+ equal(((NullTest *) clause)->arg, isnullarg))
+ return true;
+
+ return false; /* we can't succeed below... */
+ }
+
+ /* Try the clause-IS-NULL case */
+ if (clause && IsA(clause, NullTest) &&
+ ((NullTest *) clause)->nulltesttype == IS_NULL)
+ {
+ Expr *isnullarg = ((NullTest *) clause)->arg;
+
+ /* row IS NULL does not act in the simple way we have in mind */
+ if (((NullTest *) clause)->argisrow)
+ return false;
+
+ /* foo IS NULL refutes foo IS NOT NULL */
+ if (predicate && IsA(predicate, NullTest) &&
+ ((NullTest *) predicate)->nulltesttype == IS_NOT_NULL &&
+ !((NullTest *) predicate)->argisrow &&
+ equal(((NullTest *) predicate)->arg, isnullarg))
+ return true;
+
+ /* foo IS NULL weakly refutes any predicate that is strict for foo */
+ if (weak &&
+ clause_is_strict_for((Node *) predicate, (Node *) isnullarg, true))
+ return true;
+
+ return false; /* we can't succeed below... */
+ }
+
+ /* Else try operator-related knowledge */
+ return operator_predicate_proof(predicate, clause, true, weak);
+}
+
+
+/*
+ * If clause asserts the non-truth of a subclause, return that subclause;
+ * otherwise return NULL.
+ */
+static Node *
+extract_not_arg(Node *clause)
+{
+ if (clause == NULL)
+ return NULL;
+ if (IsA(clause, BoolExpr))
+ {
+ BoolExpr *bexpr = (BoolExpr *) clause;
+
+ if (bexpr->boolop == NOT_EXPR)
+ return (Node *) linitial(bexpr->args);
+ }
+ else if (IsA(clause, BooleanTest))
+ {
+ BooleanTest *btest = (BooleanTest *) clause;
+
+ if (btest->booltesttype == IS_NOT_TRUE ||
+ btest->booltesttype == IS_FALSE ||
+ btest->booltesttype == IS_UNKNOWN)
+ return (Node *) btest->arg;
+ }
+ return NULL;
+}
+
+/*
+ * If clause asserts the falsity of a subclause, return that subclause;
+ * otherwise return NULL.
+ */
+static Node *
+extract_strong_not_arg(Node *clause)
+{
+ if (clause == NULL)
+ return NULL;
+ if (IsA(clause, BoolExpr))
+ {
+ BoolExpr *bexpr = (BoolExpr *) clause;
+
+ if (bexpr->boolop == NOT_EXPR)
+ return (Node *) linitial(bexpr->args);
+ }
+ else if (IsA(clause, BooleanTest))
+ {
+ BooleanTest *btest = (BooleanTest *) clause;
+
+ if (btest->booltesttype == IS_FALSE)
+ return (Node *) btest->arg;
+ }
+ return NULL;
+}
+
+
+/*
+ * Can we prove that "clause" returns NULL (or FALSE) if "subexpr" is
+ * assumed to yield NULL?
+ *
+ * In most places in the planner, "strictness" refers to a guarantee that
+ * an expression yields NULL output for a NULL input, and that's mostly what
+ * we're looking for here. However, at top level where the clause is known
+ * to yield boolean, it may be sufficient to prove that it cannot return TRUE
+ * when "subexpr" is NULL. The caller should pass allow_false = true when
+ * this weaker property is acceptable. (When this function recurses
+ * internally, we pass down allow_false = false since we need to prove actual
+ * nullness of the subexpression.)
+ *
+ * We assume that the caller checked that least one of the input expressions
+ * is immutable. All of the proof rules here involve matching "subexpr" to
+ * some portion of "clause", so that this allows assuming that "subexpr" is
+ * immutable without a separate check.
+ *
+ * The base case is that clause and subexpr are equal().
+ *
+ * We can also report success if the subexpr appears as a subexpression
+ * of "clause" in a place where it'd force nullness of the overall result.
+ */
+static bool
+clause_is_strict_for(Node *clause, Node *subexpr, bool allow_false)
+{
+ ListCell *lc;
+
+ /* safety checks */
+ if (clause == NULL || subexpr == NULL)
+ return false;
+
+ /*
+ * Look through any RelabelType nodes, so that we can match, say,
+ * varcharcol with lower(varcharcol::text). (In general we could recurse
+ * through any nullness-preserving, immutable operation.) We should not
+ * see stacked RelabelTypes here.
+ */
+ if (IsA(clause, RelabelType))
+ clause = (Node *) ((RelabelType *) clause)->arg;
+ if (IsA(subexpr, RelabelType))
+ subexpr = (Node *) ((RelabelType *) subexpr)->arg;
+
+ /* Base case */
+ if (equal(clause, subexpr))
+ return true;
+
+ /*
+ * If we have a strict operator or function, a NULL result is guaranteed
+ * if any input is forced NULL by subexpr. This is OK even if the op or
+ * func isn't immutable, since it won't even be called on NULL input.
+ */
+ if (is_opclause(clause) &&
+ op_strict(((OpExpr *) clause)->opno))
+ {
+ foreach(lc, ((OpExpr *) clause)->args)
+ {
+ if (clause_is_strict_for((Node *) lfirst(lc), subexpr, false))
+ return true;
+ }
+ return false;
+ }
+ if (is_funcclause(clause) &&
+ func_strict(((FuncExpr *) clause)->funcid))
+ {
+ foreach(lc, ((FuncExpr *) clause)->args)
+ {
+ if (clause_is_strict_for((Node *) lfirst(lc), subexpr, false))
+ return true;
+ }
+ return false;
+ }
+
+ /*
+ * CoerceViaIO is strict (whether or not the I/O functions it calls are).
+ * Likewise, ArrayCoerceExpr is strict for its array argument (regardless
+ * of what the per-element expression is), ConvertRowtypeExpr is strict at
+ * the row level, and CoerceToDomain is strict too. These are worth
+ * checking mainly because it saves us having to explain to users why some
+ * type coercions are known strict and others aren't.
+ */
+ if (IsA(clause, CoerceViaIO))
+ return clause_is_strict_for((Node *) ((CoerceViaIO *) clause)->arg,
+ subexpr, false);
+ if (IsA(clause, ArrayCoerceExpr))
+ return clause_is_strict_for((Node *) ((ArrayCoerceExpr *) clause)->arg,
+ subexpr, false);
+ if (IsA(clause, ConvertRowtypeExpr))
+ return clause_is_strict_for((Node *) ((ConvertRowtypeExpr *) clause)->arg,
+ subexpr, false);
+ if (IsA(clause, CoerceToDomain))
+ return clause_is_strict_for((Node *) ((CoerceToDomain *) clause)->arg,
+ subexpr, false);
+
+ /*
+ * ScalarArrayOpExpr is a special case. Note that we'd only reach here
+ * with a ScalarArrayOpExpr clause if we failed to deconstruct it into an
+ * AND or OR tree, as for example if it has too many array elements.
+ */
+ if (IsA(clause, ScalarArrayOpExpr))
+ {
+ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
+ Node *scalarnode = (Node *) linitial(saop->args);
+ Node *arraynode = (Node *) lsecond(saop->args);
+
+ /*
+ * If we can prove the scalar input to be null, and the operator is
+ * strict, then the SAOP result has to be null --- unless the array is
+ * empty. For an empty array, we'd get either false (for ANY) or true
+ * (for ALL). So if allow_false = true then the proof succeeds anyway
+ * for the ANY case; otherwise we can only make the proof if we can
+ * prove the array non-empty.
+ */
+ if (clause_is_strict_for(scalarnode, subexpr, false) &&
+ op_strict(saop->opno))
+ {
+ int nelems = 0;
+
+ if (allow_false && saop->useOr)
+ return true; /* can succeed even if array is empty */
+
+ if (arraynode && IsA(arraynode, Const))
+ {
+ Const *arrayconst = (Const *) arraynode;
+ ArrayType *arrval;
+
+ /*
+ * If array is constant NULL then we can succeed, as in the
+ * case below.
+ */
+ if (arrayconst->constisnull)
+ return true;
+
+ /* Otherwise, we can compute the number of elements. */
+ arrval = DatumGetArrayTypeP(arrayconst->constvalue);
+ nelems = ArrayGetNItems(ARR_NDIM(arrval), ARR_DIMS(arrval));
+ }
+ else if (arraynode && IsA(arraynode, ArrayExpr) &&
+ !((ArrayExpr *) arraynode)->multidims)
+ {
+ /*
+ * We can also reliably count the number of array elements if
+ * the input is a non-multidim ARRAY[] expression.
+ */
+ nelems = list_length(((ArrayExpr *) arraynode)->elements);
+ }
+
+ /* Proof succeeds if array is definitely non-empty */
+ if (nelems > 0)
+ return true;
+ }
+
+ /*
+ * If we can prove the array input to be null, the proof succeeds in
+ * all cases, since ScalarArrayOpExpr will always return NULL for a
+ * NULL array. Otherwise, we're done here.
+ */
+ return clause_is_strict_for(arraynode, subexpr, false);
+ }
+
+ /*
+ * When recursing into an expression, we might find a NULL constant.
+ * That's certainly NULL, whether it matches subexpr or not.
+ */
+ if (IsA(clause, Const))
+ return ((Const *) clause)->constisnull;
+
+ return false;
+}
+
+
+/*
+ * Define "operator implication tables" for btree operators ("strategies"),
+ * and similar tables for refutation.
+ *
+ * The strategy numbers defined by btree indexes (see access/stratnum.h) are:
+ * 1 < 2 <= 3 = 4 >= 5 >
+ * and in addition we use 6 to represent <>. <> is not a btree-indexable
+ * operator, but we assume here that if an equality operator of a btree
+ * opfamily has a negator operator, the negator behaves as <> for the opfamily.
+ * (This convention is also known to get_op_btree_interpretation().)
+ *
+ * BT_implies_table[] and BT_refutes_table[] are used for cases where we have
+ * two identical subexpressions and we want to know whether one operator
+ * expression implies or refutes the other. That is, if the "clause" is
+ * EXPR1 clause_op EXPR2 and the "predicate" is EXPR1 pred_op EXPR2 for the
+ * same two (immutable) subexpressions:
+ * BT_implies_table[clause_op-1][pred_op-1]
+ * is true if the clause implies the predicate
+ * BT_refutes_table[clause_op-1][pred_op-1]
+ * is true if the clause refutes the predicate
+ * where clause_op and pred_op are strategy numbers (from 1 to 6) in the
+ * same btree opfamily. For example, "x < y" implies "x <= y" and refutes
+ * "x > y".
+ *
+ * BT_implic_table[] and BT_refute_table[] are used where we have two
+ * constants that we need to compare. The interpretation of:
+ *
+ * test_op = BT_implic_table[clause_op-1][pred_op-1]
+ *
+ * where test_op, clause_op and pred_op are strategy numbers (from 1 to 6)
+ * of btree operators, is as follows:
+ *
+ * If you know, for some EXPR, that "EXPR clause_op CONST1" is true, and you
+ * want to determine whether "EXPR pred_op CONST2" must also be true, then
+ * you can use "CONST2 test_op CONST1" as a test. If this test returns true,
+ * then the predicate expression must be true; if the test returns false,
+ * then the predicate expression may be false.
+ *
+ * For example, if clause is "Quantity > 10" and pred is "Quantity > 5"
+ * then we test "5 <= 10" which evals to true, so clause implies pred.
+ *
+ * Similarly, the interpretation of a BT_refute_table entry is:
+ *
+ * If you know, for some EXPR, that "EXPR clause_op CONST1" is true, and you
+ * want to determine whether "EXPR pred_op CONST2" must be false, then
+ * you can use "CONST2 test_op CONST1" as a test. If this test returns true,
+ * then the predicate expression must be false; if the test returns false,
+ * then the predicate expression may be true.
+ *
+ * For example, if clause is "Quantity > 10" and pred is "Quantity < 5"
+ * then we test "5 <= 10" which evals to true, so clause refutes pred.
+ *
+ * An entry where test_op == 0 means the implication cannot be determined.
+ */
+
+#define BTLT BTLessStrategyNumber
+#define BTLE BTLessEqualStrategyNumber
+#define BTEQ BTEqualStrategyNumber
+#define BTGE BTGreaterEqualStrategyNumber
+#define BTGT BTGreaterStrategyNumber
+#define BTNE ROWCOMPARE_NE
+
+/* We use "none" for 0/false to make the tables align nicely */
+#define none 0
+
+static const bool BT_implies_table[6][6] = {
+/*
+ * The predicate operator:
+ * LT LE EQ GE GT NE
+ */
+ {true, true, none, none, none, true}, /* LT */
+ {none, true, none, none, none, none}, /* LE */
+ {none, true, true, true, none, none}, /* EQ */
+ {none, none, none, true, none, none}, /* GE */
+ {none, none, none, true, true, true}, /* GT */
+ {none, none, none, none, none, true} /* NE */
+};
+
+static const bool BT_refutes_table[6][6] = {
+/*
+ * The predicate operator:
+ * LT LE EQ GE GT NE
+ */
+ {none, none, true, true, true, none}, /* LT */
+ {none, none, none, none, true, none}, /* LE */
+ {true, none, none, none, true, true}, /* EQ */
+ {true, none, none, none, none, none}, /* GE */
+ {true, true, true, none, none, none}, /* GT */
+ {none, none, true, none, none, none} /* NE */
+};
+
+static const StrategyNumber BT_implic_table[6][6] = {
+/*
+ * The predicate operator:
+ * LT LE EQ GE GT NE
+ */
+ {BTGE, BTGE, none, none, none, BTGE}, /* LT */
+ {BTGT, BTGE, none, none, none, BTGT}, /* LE */
+ {BTGT, BTGE, BTEQ, BTLE, BTLT, BTNE}, /* EQ */
+ {none, none, none, BTLE, BTLT, BTLT}, /* GE */
+ {none, none, none, BTLE, BTLE, BTLE}, /* GT */
+ {none, none, none, none, none, BTEQ} /* NE */
+};
+
+static const StrategyNumber BT_refute_table[6][6] = {
+/*
+ * The predicate operator:
+ * LT LE EQ GE GT NE
+ */
+ {none, none, BTGE, BTGE, BTGE, none}, /* LT */
+ {none, none, BTGT, BTGT, BTGE, none}, /* LE */
+ {BTLE, BTLT, BTNE, BTGT, BTGE, BTEQ}, /* EQ */
+ {BTLE, BTLT, BTLT, none, none, none}, /* GE */
+ {BTLE, BTLE, BTLE, none, none, none}, /* GT */
+ {none, none, BTEQ, none, none, none} /* NE */
+};
+
+
+/*
+ * operator_predicate_proof
+ * Does the predicate implication or refutation test for a "simple clause"
+ * predicate and a "simple clause" restriction, when both are operator
+ * clauses using related operators and identical input expressions.
+ *
+ * When refute_it == false, we want to prove the predicate true;
+ * when refute_it == true, we want to prove the predicate false.
+ * (There is enough common code to justify handling these two cases
+ * in one routine.) We return true if able to make the proof, false
+ * if not able to prove it.
+ *
+ * We mostly need not distinguish strong vs. weak implication/refutation here.
+ * This depends on the assumption that a pair of related operators (i.e.,
+ * commutators, negators, or btree opfamily siblings) will not return one NULL
+ * and one non-NULL result for the same inputs. Then, for the proof types
+ * where we start with an assumption of truth of the clause, the predicate
+ * operator could not return NULL either, so it doesn't matter whether we are
+ * trying to make a strong or weak proof. For weak implication, it could be
+ * that the clause operator returned NULL, but then the predicate operator
+ * would as well, so that the weak implication still holds. This argument
+ * doesn't apply in the case where we are considering two different constant
+ * values, since then the operators aren't being given identical inputs. But
+ * we only support that for btree operators, for which we can assume that all
+ * non-null inputs result in non-null outputs, so that it doesn't matter which
+ * two non-null constants we consider. If either constant is NULL, we have
+ * to think harder, but sometimes the proof still works, as explained below.
+ *
+ * We can make proofs involving several expression forms (here "foo" and "bar"
+ * represent subexpressions that are identical according to equal()):
+ * "foo op1 bar" refutes "foo op2 bar" if op1 is op2's negator
+ * "foo op1 bar" implies "bar op2 foo" if op1 is op2's commutator
+ * "foo op1 bar" refutes "bar op2 foo" if op1 is negator of op2's commutator
+ * "foo op1 bar" can imply/refute "foo op2 bar" based on btree semantics
+ * "foo op1 bar" can imply/refute "bar op2 foo" based on btree semantics
+ * "foo op1 const1" can imply/refute "foo op2 const2" based on btree semantics
+ *
+ * For the last three cases, op1 and op2 have to be members of the same btree
+ * operator family. When both subexpressions are identical, the idea is that,
+ * for instance, x < y implies x <= y, independently of exactly what x and y
+ * are. If we have two different constants compared to the same expression
+ * foo, we have to execute a comparison between the two constant values
+ * in order to determine the result; for instance, foo < c1 implies foo < c2
+ * if c1 <= c2. We assume it's safe to compare the constants at plan time
+ * if the comparison operator is immutable.
+ *
+ * Note: all the operators and subexpressions have to be immutable for the
+ * proof to be safe. We assume the predicate expression is entirely immutable,
+ * so no explicit check on the subexpressions is needed here, but in some
+ * cases we need an extra check of operator immutability. In particular,
+ * btree opfamilies can contain cross-type operators that are merely stable,
+ * and we dare not make deductions with those.
+ */
+static bool
+operator_predicate_proof(Expr *predicate, Node *clause,
+ bool refute_it, bool weak)
+{
+ OpExpr *pred_opexpr,
+ *clause_opexpr;
+ Oid pred_collation,
+ clause_collation;
+ Oid pred_op,
+ clause_op,
+ test_op;
+ Node *pred_leftop,
+ *pred_rightop,
+ *clause_leftop,
+ *clause_rightop;
+ Const *pred_const,
+ *clause_const;
+ Expr *test_expr;
+ ExprState *test_exprstate;
+ Datum test_result;
+ bool isNull;
+ EState *estate;
+ MemoryContext oldcontext;
+
+ /*
+ * Both expressions must be binary opclauses, else we can't do anything.
+ *
+ * Note: in future we might extend this logic to other operator-based
+ * constructs such as DistinctExpr. But the planner isn't very smart
+ * about DistinctExpr in general, and this probably isn't the first place
+ * to fix if you want to improve that.
+ */
+ if (!is_opclause(predicate))
+ return false;
+ pred_opexpr = (OpExpr *) predicate;
+ if (list_length(pred_opexpr->args) != 2)
+ return false;
+ if (!is_opclause(clause))
+ return false;
+ clause_opexpr = (OpExpr *) clause;
+ if (list_length(clause_opexpr->args) != 2)
+ return false;
+
+ /*
+ * If they're marked with different collations then we can't do anything.
+ * This is a cheap test so let's get it out of the way early.
+ */
+ pred_collation = pred_opexpr->inputcollid;
+ clause_collation = clause_opexpr->inputcollid;
+ if (pred_collation != clause_collation)
+ return false;
+
+ /* Grab the operator OIDs now too. We may commute these below. */
+ pred_op = pred_opexpr->opno;
+ clause_op = clause_opexpr->opno;
+
+ /*
+ * We have to match up at least one pair of input expressions.
+ */
+ pred_leftop = (Node *) linitial(pred_opexpr->args);
+ pred_rightop = (Node *) lsecond(pred_opexpr->args);
+ clause_leftop = (Node *) linitial(clause_opexpr->args);
+ clause_rightop = (Node *) lsecond(clause_opexpr->args);
+
+ if (equal(pred_leftop, clause_leftop))
+ {
+ if (equal(pred_rightop, clause_rightop))
+ {
+ /* We have x op1 y and x op2 y */
+ return operator_same_subexprs_proof(pred_op, clause_op, refute_it);
+ }
+ else
+ {
+ /* Fail unless rightops are both Consts */
+ if (pred_rightop == NULL || !IsA(pred_rightop, Const))
+ return false;
+ pred_const = (Const *) pred_rightop;
+ if (clause_rightop == NULL || !IsA(clause_rightop, Const))
+ return false;
+ clause_const = (Const *) clause_rightop;
+ }
+ }
+ else if (equal(pred_rightop, clause_rightop))
+ {
+ /* Fail unless leftops are both Consts */
+ if (pred_leftop == NULL || !IsA(pred_leftop, Const))
+ return false;
+ pred_const = (Const *) pred_leftop;
+ if (clause_leftop == NULL || !IsA(clause_leftop, Const))
+ return false;
+ clause_const = (Const *) clause_leftop;
+ /* Commute both operators so we can assume Consts are on the right */
+ pred_op = get_commutator(pred_op);
+ if (!OidIsValid(pred_op))
+ return false;
+ clause_op = get_commutator(clause_op);
+ if (!OidIsValid(clause_op))
+ return false;
+ }
+ else if (equal(pred_leftop, clause_rightop))
+ {
+ if (equal(pred_rightop, clause_leftop))
+ {
+ /* We have x op1 y and y op2 x */
+ /* Commute pred_op that we can treat this like a straight match */
+ pred_op = get_commutator(pred_op);
+ if (!OidIsValid(pred_op))
+ return false;
+ return operator_same_subexprs_proof(pred_op, clause_op, refute_it);
+ }
+ else
+ {
+ /* Fail unless pred_rightop/clause_leftop are both Consts */
+ if (pred_rightop == NULL || !IsA(pred_rightop, Const))
+ return false;
+ pred_const = (Const *) pred_rightop;
+ if (clause_leftop == NULL || !IsA(clause_leftop, Const))
+ return false;
+ clause_const = (Const *) clause_leftop;
+ /* Commute clause_op so we can assume Consts are on the right */
+ clause_op = get_commutator(clause_op);
+ if (!OidIsValid(clause_op))
+ return false;
+ }
+ }
+ else if (equal(pred_rightop, clause_leftop))
+ {
+ /* Fail unless pred_leftop/clause_rightop are both Consts */
+ if (pred_leftop == NULL || !IsA(pred_leftop, Const))
+ return false;
+ pred_const = (Const *) pred_leftop;
+ if (clause_rightop == NULL || !IsA(clause_rightop, Const))
+ return false;
+ clause_const = (Const *) clause_rightop;
+ /* Commute pred_op so we can assume Consts are on the right */
+ pred_op = get_commutator(pred_op);
+ if (!OidIsValid(pred_op))
+ return false;
+ }
+ else
+ {
+ /* Failed to match up any of the subexpressions, so we lose */
+ return false;
+ }
+
+ /*
+ * We have two identical subexpressions, and two other subexpressions that
+ * are not identical but are both Consts; and we have commuted the
+ * operators if necessary so that the Consts are on the right. We'll need
+ * to compare the Consts' values. If either is NULL, we can't do that, so
+ * usually the proof fails ... but in some cases we can claim success.
+ */
+ if (clause_const->constisnull)
+ {
+ /* If clause_op isn't strict, we can't prove anything */
+ if (!op_strict(clause_op))
+ return false;
+
+ /*
+ * At this point we know that the clause returns NULL. For proof
+ * types that assume truth of the clause, this means the proof is
+ * vacuously true (a/k/a "false implies anything"). That's all proof
+ * types except weak implication.
+ */
+ if (!(weak && !refute_it))
+ return true;
+
+ /*
+ * For weak implication, it's still possible for the proof to succeed,
+ * if the predicate can also be proven NULL. In that case we've got
+ * NULL => NULL which is valid for this proof type.
+ */
+ if (pred_const->constisnull && op_strict(pred_op))
+ return true;
+ /* Else the proof fails */
+ return false;
+ }
+ if (pred_const->constisnull)
+ {
+ /*
+ * If the pred_op is strict, we know the predicate yields NULL, which
+ * means the proof succeeds for either weak implication or weak
+ * refutation.
+ */
+ if (weak && op_strict(pred_op))
+ return true;
+ /* Else the proof fails */
+ return false;
+ }
+
+ /*
+ * Lookup the constant-comparison operator using the system catalogs and
+ * the operator implication tables.
+ */
+ test_op = get_btree_test_op(pred_op, clause_op, refute_it);
+
+ if (!OidIsValid(test_op))
+ {
+ /* couldn't find a suitable comparison operator */
+ return false;
+ }
+
+ /*
+ * Evaluate the test. For this we need an EState.
+ */
+ estate = CreateExecutorState();
+
+ /* We can use the estate's working context to avoid memory leaks. */
+ oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
+
+ /* Build expression tree */
+ test_expr = make_opclause(test_op,
+ BOOLOID,
+ false,
+ (Expr *) pred_const,
+ (Expr *) clause_const,
+ InvalidOid,
+ pred_collation);
+
+ /* Fill in opfuncids */
+ fix_opfuncids((Node *) test_expr);
+
+ /* Prepare it for execution */
+ test_exprstate = ExecInitExpr(test_expr, NULL);
+
+ /* And execute it. */
+ test_result = ExecEvalExprSwitchContext(test_exprstate,
+ GetPerTupleExprContext(estate),
+ &isNull);
+
+ /* Get back to outer memory context */
+ MemoryContextSwitchTo(oldcontext);
+
+ /* Release all the junk we just created */
+ FreeExecutorState(estate);
+
+ if (isNull)
+ {
+ /* Treat a null result as non-proof ... but it's a tad fishy ... */
+ elog(DEBUG2, "null predicate test result");
+ return false;
+ }
+ return DatumGetBool(test_result);
+}
+
+
+/*
+ * operator_same_subexprs_proof
+ * Assuming that EXPR1 clause_op EXPR2 is true, try to prove or refute
+ * EXPR1 pred_op EXPR2.
+ *
+ * Return true if able to make the proof, false if not able to prove it.
+ */
+static bool
+operator_same_subexprs_proof(Oid pred_op, Oid clause_op, bool refute_it)
+{
+ /*
+ * A simple and general rule is that the predicate is proven if clause_op
+ * and pred_op are the same, or refuted if they are each other's negators.
+ * We need not check immutability since the pred_op is already known
+ * immutable. (Actually, by this point we may have the commutator of a
+ * known-immutable pred_op, but that should certainly be immutable too.
+ * Likewise we don't worry whether the pred_op's negator is immutable.)
+ *
+ * Note: the "same" case won't get here if we actually had EXPR1 clause_op
+ * EXPR2 and EXPR1 pred_op EXPR2, because the overall-expression-equality
+ * test in predicate_implied_by_simple_clause would have caught it. But
+ * we can see the same operator after having commuted the pred_op.
+ */
+ if (refute_it)
+ {
+ if (get_negator(pred_op) == clause_op)
+ return true;
+ }
+ else
+ {
+ if (pred_op == clause_op)
+ return true;
+ }
+
+ /*
+ * Otherwise, see if we can determine the implication by finding the
+ * operators' relationship via some btree opfamily.
+ */
+ return operator_same_subexprs_lookup(pred_op, clause_op, refute_it);
+}
+
+
+/*
+ * We use a lookaside table to cache the result of btree proof operator
+ * lookups, since the actual lookup is pretty expensive and doesn't change
+ * for any given pair of operators (at least as long as pg_amop doesn't
+ * change). A single hash entry stores both implication and refutation
+ * results for a given pair of operators; but note we may have determined
+ * only one of those sets of results as yet.
+ */
+typedef struct OprProofCacheKey
+{
+ Oid pred_op; /* predicate operator */
+ Oid clause_op; /* clause operator */
+} OprProofCacheKey;
+
+typedef struct OprProofCacheEntry
+{
+ /* the hash lookup key MUST BE FIRST */
+ OprProofCacheKey key;
+
+ bool have_implic; /* do we know the implication result? */
+ bool have_refute; /* do we know the refutation result? */
+ bool same_subexprs_implies; /* X clause_op Y implies X pred_op Y? */
+ bool same_subexprs_refutes; /* X clause_op Y refutes X pred_op Y? */
+ Oid implic_test_op; /* OID of the test operator, or 0 if none */
+ Oid refute_test_op; /* OID of the test operator, or 0 if none */
+} OprProofCacheEntry;
+
+static HTAB *OprProofCacheHash = NULL;
+
+
+/*
+ * lookup_proof_cache
+ * Get, and fill in if necessary, the appropriate cache entry.
+ */
+static OprProofCacheEntry *
+lookup_proof_cache(Oid pred_op, Oid clause_op, bool refute_it)
+{
+ OprProofCacheKey key;
+ OprProofCacheEntry *cache_entry;
+ bool cfound;
+ bool same_subexprs = false;
+ Oid test_op = InvalidOid;
+ bool found = false;
+ List *pred_op_infos,
+ *clause_op_infos;
+ ListCell *lcp,
+ *lcc;
+
+ /*
+ * Find or make a cache entry for this pair of operators.
+ */
+ if (OprProofCacheHash == NULL)
+ {
+ /* First time through: initialize the hash table */
+ HASHCTL ctl;
+
+ ctl.keysize = sizeof(OprProofCacheKey);
+ ctl.entrysize = sizeof(OprProofCacheEntry);
+ OprProofCacheHash = hash_create("Btree proof lookup cache", 256,
+ &ctl, HASH_ELEM | HASH_BLOBS);
+
+ /* Arrange to flush cache on pg_amop changes */
+ CacheRegisterSyscacheCallback(AMOPOPID,
+ InvalidateOprProofCacheCallBack,
+ (Datum) 0);
+ }
+
+ key.pred_op = pred_op;
+ key.clause_op = clause_op;
+ cache_entry = (OprProofCacheEntry *) hash_search(OprProofCacheHash,
+ (void *) &key,
+ HASH_ENTER, &cfound);
+ if (!cfound)
+ {
+ /* new cache entry, set it invalid */
+ cache_entry->have_implic = false;
+ cache_entry->have_refute = false;
+ }
+ else
+ {
+ /* pre-existing cache entry, see if we know the answer yet */
+ if (refute_it ? cache_entry->have_refute : cache_entry->have_implic)
+ return cache_entry;
+ }
+
+ /*
+ * Try to find a btree opfamily containing the given operators.
+ *
+ * We must find a btree opfamily that contains both operators, else the
+ * implication can't be determined. Also, the opfamily must contain a
+ * suitable test operator taking the operators' righthand datatypes.
+ *
+ * If there are multiple matching opfamilies, assume we can use any one to
+ * determine the logical relationship of the two operators and the correct
+ * corresponding test operator. This should work for any logically
+ * consistent opfamilies.
+ *
+ * Note that we can determine the operators' relationship for
+ * same-subexprs cases even from an opfamily that lacks a usable test
+ * operator. This can happen in cases with incomplete sets of cross-type
+ * comparison operators.
+ */
+ clause_op_infos = get_op_btree_interpretation(clause_op);
+ if (clause_op_infos)
+ pred_op_infos = get_op_btree_interpretation(pred_op);
+ else /* no point in looking */
+ pred_op_infos = NIL;
+
+ foreach(lcp, pred_op_infos)
+ {
+ OpBtreeInterpretation *pred_op_info = lfirst(lcp);
+ Oid opfamily_id = pred_op_info->opfamily_id;
+
+ foreach(lcc, clause_op_infos)
+ {
+ OpBtreeInterpretation *clause_op_info = lfirst(lcc);
+ StrategyNumber pred_strategy,
+ clause_strategy,
+ test_strategy;
+
+ /* Must find them in same opfamily */
+ if (opfamily_id != clause_op_info->opfamily_id)
+ continue;
+ /* Lefttypes should match */
+ Assert(clause_op_info->oplefttype == pred_op_info->oplefttype);
+
+ pred_strategy = pred_op_info->strategy;
+ clause_strategy = clause_op_info->strategy;
+
+ /*
+ * Check to see if we can make a proof for same-subexpressions
+ * cases based on the operators' relationship in this opfamily.
+ */
+ if (refute_it)
+ same_subexprs |= BT_refutes_table[clause_strategy - 1][pred_strategy - 1];
+ else
+ same_subexprs |= BT_implies_table[clause_strategy - 1][pred_strategy - 1];
+
+ /*
+ * Look up the "test" strategy number in the implication table
+ */
+ if (refute_it)
+ test_strategy = BT_refute_table[clause_strategy - 1][pred_strategy - 1];
+ else
+ test_strategy = BT_implic_table[clause_strategy - 1][pred_strategy - 1];
+
+ if (test_strategy == 0)
+ {
+ /* Can't determine implication using this interpretation */
+ continue;
+ }
+
+ /*
+ * See if opfamily has an operator for the test strategy and the
+ * datatypes.
+ */
+ if (test_strategy == BTNE)
+ {
+ test_op = get_opfamily_member(opfamily_id,
+ pred_op_info->oprighttype,
+ clause_op_info->oprighttype,
+ BTEqualStrategyNumber);
+ if (OidIsValid(test_op))
+ test_op = get_negator(test_op);
+ }
+ else
+ {
+ test_op = get_opfamily_member(opfamily_id,
+ pred_op_info->oprighttype,
+ clause_op_info->oprighttype,
+ test_strategy);
+ }
+
+ if (!OidIsValid(test_op))
+ continue;
+
+ /*
+ * Last check: test_op must be immutable.
+ *
+ * Note that we require only the test_op to be immutable, not the
+ * original clause_op. (pred_op is assumed to have been checked
+ * immutable by the caller.) Essentially we are assuming that the
+ * opfamily is consistent even if it contains operators that are
+ * merely stable.
+ */
+ if (op_volatile(test_op) == PROVOLATILE_IMMUTABLE)
+ {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ break;
+ }
+
+ list_free_deep(pred_op_infos);
+ list_free_deep(clause_op_infos);
+
+ if (!found)
+ {
+ /* couldn't find a suitable comparison operator */
+ test_op = InvalidOid;
+ }
+
+ /*
+ * If we think we were able to prove something about same-subexpressions
+ * cases, check to make sure the clause_op is immutable before believing
+ * it completely. (Usually, the clause_op would be immutable if the
+ * pred_op is, but it's not entirely clear that this must be true in all
+ * cases, so let's check.)
+ */
+ if (same_subexprs &&
+ op_volatile(clause_op) != PROVOLATILE_IMMUTABLE)
+ same_subexprs = false;
+
+ /* Cache the results, whether positive or negative */
+ if (refute_it)
+ {
+ cache_entry->refute_test_op = test_op;
+ cache_entry->same_subexprs_refutes = same_subexprs;
+ cache_entry->have_refute = true;
+ }
+ else
+ {
+ cache_entry->implic_test_op = test_op;
+ cache_entry->same_subexprs_implies = same_subexprs;
+ cache_entry->have_implic = true;
+ }
+
+ return cache_entry;
+}
+
+/*
+ * operator_same_subexprs_lookup
+ * Convenience subroutine to look up the cached answer for
+ * same-subexpressions cases.
+ */
+static bool
+operator_same_subexprs_lookup(Oid pred_op, Oid clause_op, bool refute_it)
+{
+ OprProofCacheEntry *cache_entry;
+
+ cache_entry = lookup_proof_cache(pred_op, clause_op, refute_it);
+ if (refute_it)
+ return cache_entry->same_subexprs_refutes;
+ else
+ return cache_entry->same_subexprs_implies;
+}
+
+/*
+ * get_btree_test_op
+ * Identify the comparison operator needed for a btree-operator
+ * proof or refutation involving comparison of constants.
+ *
+ * Given the truth of a clause "var clause_op const1", we are attempting to
+ * prove or refute a predicate "var pred_op const2". The identities of the
+ * two operators are sufficient to determine the operator (if any) to compare
+ * const2 to const1 with.
+ *
+ * Returns the OID of the operator to use, or InvalidOid if no proof is
+ * possible.
+ */
+static Oid
+get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
+{
+ OprProofCacheEntry *cache_entry;
+
+ cache_entry = lookup_proof_cache(pred_op, clause_op, refute_it);
+ if (refute_it)
+ return cache_entry->refute_test_op;
+ else
+ return cache_entry->implic_test_op;
+}
+
+
+/*
+ * Callback for pg_amop inval events
+ */
+static void
+InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
+{
+ HASH_SEQ_STATUS status;
+ OprProofCacheEntry *hentry;
+
+ Assert(OprProofCacheHash != NULL);
+
+ /* Currently we just reset all entries; hard to be smarter ... */
+ hash_seq_init(&status, OprProofCacheHash);
+
+ while ((hentry = (OprProofCacheEntry *) hash_seq_search(&status)) != NULL)
+ {
+ hentry->have_implic = false;
+ hentry->have_refute = false;
+ }
+}
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
new file mode 100644
index 0000000..3c75fd5
--- /dev/null
+++ b/src/backend/optimizer/util/relnode.c
@@ -0,0 +1,2047 @@
+/*-------------------------------------------------------------------------
+ *
+ * relnode.c
+ * Relation-node lookup/construction routines
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/relnode.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <limits.h>
+
+#include "miscadmin.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/appendinfo.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/inherit.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/placeholder.h"
+#include "optimizer/plancat.h"
+#include "optimizer/restrictinfo.h"
+#include "optimizer/tlist.h"
+#include "utils/hsearch.h"
+#include "utils/lsyscache.h"
+
+
+typedef struct JoinHashEntry
+{
+ Relids join_relids; /* hash key --- MUST BE FIRST */
+ RelOptInfo *join_rel;
+} JoinHashEntry;
+
+static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
+ RelOptInfo *input_rel);
+static List *build_joinrel_restrictlist(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel);
+static void build_joinrel_joinlist(RelOptInfo *joinrel,
+ RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel);
+static List *subbuild_joinrel_restrictlist(RelOptInfo *joinrel,
+ List *joininfo_list,
+ List *new_restrictlist);
+static List *subbuild_joinrel_joinlist(RelOptInfo *joinrel,
+ List *joininfo_list,
+ List *new_joininfo);
+static void set_foreign_rel_properties(RelOptInfo *joinrel,
+ RelOptInfo *outer_rel, RelOptInfo *inner_rel);
+static void add_join_rel(PlannerInfo *root, RelOptInfo *joinrel);
+static void build_joinrel_partition_info(RelOptInfo *joinrel,
+ RelOptInfo *outer_rel, RelOptInfo *inner_rel,
+ List *restrictlist, JoinType jointype);
+static bool have_partkey_equi_join(RelOptInfo *joinrel,
+ RelOptInfo *rel1, RelOptInfo *rel2,
+ JoinType jointype, List *restrictlist);
+static int match_expr_to_partition_keys(Expr *expr, RelOptInfo *rel,
+ bool strict_op);
+static void set_joinrel_partition_key_exprs(RelOptInfo *joinrel,
+ RelOptInfo *outer_rel, RelOptInfo *inner_rel,
+ JoinType jointype);
+static void build_child_join_reltarget(PlannerInfo *root,
+ RelOptInfo *parentrel,
+ RelOptInfo *childrel,
+ int nappinfos,
+ AppendRelInfo **appinfos);
+
+
+/*
+ * setup_simple_rel_arrays
+ * Prepare the arrays we use for quickly accessing base relations
+ * and AppendRelInfos.
+ */
+void
+setup_simple_rel_arrays(PlannerInfo *root)
+{
+ int size;
+ Index rti;
+ ListCell *lc;
+
+ /* Arrays are accessed using RT indexes (1..N) */
+ size = list_length(root->parse->rtable) + 1;
+ root->simple_rel_array_size = size;
+
+ /*
+ * simple_rel_array is initialized to all NULLs, since no RelOptInfos
+ * exist yet. It'll be filled by later calls to build_simple_rel().
+ */
+ root->simple_rel_array = (RelOptInfo **)
+ palloc0(size * sizeof(RelOptInfo *));
+
+ /* simple_rte_array is an array equivalent of the rtable list */
+ root->simple_rte_array = (RangeTblEntry **)
+ palloc0(size * sizeof(RangeTblEntry *));
+ rti = 1;
+ foreach(lc, root->parse->rtable)
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
+
+ root->simple_rte_array[rti++] = rte;
+ }
+
+ /* append_rel_array is not needed if there are no AppendRelInfos */
+ if (root->append_rel_list == NIL)
+ {
+ root->append_rel_array = NULL;
+ return;
+ }
+
+ root->append_rel_array = (AppendRelInfo **)
+ palloc0(size * sizeof(AppendRelInfo *));
+
+ /*
+ * append_rel_array is filled with any already-existing AppendRelInfos,
+ * which currently could only come from UNION ALL flattening. We might
+ * add more later during inheritance expansion, but it's the
+ * responsibility of the expansion code to update the array properly.
+ */
+ foreach(lc, root->append_rel_list)
+ {
+ AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc);
+ int child_relid = appinfo->child_relid;
+
+ /* Sanity check */
+ Assert(child_relid < size);
+
+ if (root->append_rel_array[child_relid])
+ elog(ERROR, "child relation already exists");
+
+ root->append_rel_array[child_relid] = appinfo;
+ }
+}
+
+/*
+ * expand_planner_arrays
+ * Expand the PlannerInfo's per-RTE arrays by add_size members
+ * and initialize the newly added entries to NULLs
+ *
+ * Note: this causes the append_rel_array to become allocated even if
+ * it was not before. This is okay for current uses, because we only call
+ * this when adding child relations, which always have AppendRelInfos.
+ */
+void
+expand_planner_arrays(PlannerInfo *root, int add_size)
+{
+ int new_size;
+
+ Assert(add_size > 0);
+
+ new_size = root->simple_rel_array_size + add_size;
+
+ root->simple_rel_array = (RelOptInfo **)
+ repalloc(root->simple_rel_array,
+ sizeof(RelOptInfo *) * new_size);
+ MemSet(root->simple_rel_array + root->simple_rel_array_size,
+ 0, sizeof(RelOptInfo *) * add_size);
+
+ root->simple_rte_array = (RangeTblEntry **)
+ repalloc(root->simple_rte_array,
+ sizeof(RangeTblEntry *) * new_size);
+ MemSet(root->simple_rte_array + root->simple_rel_array_size,
+ 0, sizeof(RangeTblEntry *) * add_size);
+
+ if (root->append_rel_array)
+ {
+ root->append_rel_array = (AppendRelInfo **)
+ repalloc(root->append_rel_array,
+ sizeof(AppendRelInfo *) * new_size);
+ MemSet(root->append_rel_array + root->simple_rel_array_size,
+ 0, sizeof(AppendRelInfo *) * add_size);
+ }
+ else
+ {
+ root->append_rel_array = (AppendRelInfo **)
+ palloc0(sizeof(AppendRelInfo *) * new_size);
+ }
+
+ root->simple_rel_array_size = new_size;
+}
+
+/*
+ * build_simple_rel
+ * Construct a new RelOptInfo for a base relation or 'other' relation.
+ */
+RelOptInfo *
+build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
+{
+ RelOptInfo *rel;
+ RangeTblEntry *rte;
+
+ /* Rel should not exist already */
+ Assert(relid > 0 && relid < root->simple_rel_array_size);
+ if (root->simple_rel_array[relid] != NULL)
+ elog(ERROR, "rel %d already exists", relid);
+
+ /* Fetch RTE for relation */
+ rte = root->simple_rte_array[relid];
+ Assert(rte != NULL);
+
+ rel = makeNode(RelOptInfo);
+ rel->reloptkind = parent ? RELOPT_OTHER_MEMBER_REL : RELOPT_BASEREL;
+ rel->relids = bms_make_singleton(relid);
+ rel->rows = 0;
+ /* cheap startup cost is interesting iff not all tuples to be retrieved */
+ rel->consider_startup = (root->tuple_fraction > 0);
+ rel->consider_param_startup = false; /* might get changed later */
+ rel->consider_parallel = false; /* might get changed later */
+ rel->reltarget = create_empty_pathtarget();
+ rel->pathlist = NIL;
+ rel->ppilist = NIL;
+ rel->partial_pathlist = NIL;
+ rel->cheapest_startup_path = NULL;
+ rel->cheapest_total_path = NULL;
+ rel->cheapest_unique_path = NULL;
+ rel->cheapest_parameterized_paths = NIL;
+ rel->relid = relid;
+ rel->rtekind = rte->rtekind;
+ /* min_attr, max_attr, attr_needed, attr_widths are set below */
+ rel->lateral_vars = NIL;
+ rel->indexlist = NIL;
+ rel->statlist = NIL;
+ rel->pages = 0;
+ rel->tuples = 0;
+ rel->allvisfrac = 0;
+ rel->eclass_indexes = NULL;
+ rel->subroot = NULL;
+ rel->subplan_params = NIL;
+ rel->rel_parallel_workers = -1; /* set up in get_relation_info */
+ rel->amflags = 0;
+ rel->serverid = InvalidOid;
+ rel->userid = rte->checkAsUser;
+ rel->useridiscurrent = false;
+ rel->fdwroutine = NULL;
+ rel->fdw_private = NULL;
+ rel->unique_for_rels = NIL;
+ rel->non_unique_for_rels = NIL;
+ rel->baserestrictinfo = NIL;
+ rel->baserestrictcost.startup = 0;
+ rel->baserestrictcost.per_tuple = 0;
+ rel->baserestrict_min_security = UINT_MAX;
+ rel->joininfo = NIL;
+ rel->has_eclass_joins = false;
+ rel->consider_partitionwise_join = false; /* might get changed later */
+ rel->part_scheme = NULL;
+ rel->nparts = -1;
+ rel->boundinfo = NULL;
+ rel->partbounds_merged = false;
+ rel->partition_qual = NIL;
+ rel->part_rels = NULL;
+ rel->live_parts = NULL;
+ rel->all_partrels = NULL;
+ rel->partexprs = NULL;
+ rel->nullable_partexprs = NULL;
+
+ /*
+ * Pass assorted information down the inheritance hierarchy.
+ */
+ if (parent)
+ {
+ /*
+ * Each direct or indirect child wants to know the relids of its
+ * topmost parent.
+ */
+ if (parent->top_parent_relids)
+ rel->top_parent_relids = parent->top_parent_relids;
+ else
+ rel->top_parent_relids = bms_copy(parent->relids);
+
+ /*
+ * Also propagate lateral-reference information from appendrel parent
+ * rels to their child rels. We intentionally give each child rel the
+ * same minimum parameterization, even though it's quite possible that
+ * some don't reference all the lateral rels. This is because any
+ * append path for the parent will have to have the same
+ * parameterization for every child anyway, and there's no value in
+ * forcing extra reparameterize_path() calls. Similarly, a lateral
+ * reference to the parent prevents use of otherwise-movable join rels
+ * for each child.
+ *
+ * It's possible for child rels to have their own children, in which
+ * case the topmost parent's lateral info propagates all the way down.
+ */
+ rel->direct_lateral_relids = parent->direct_lateral_relids;
+ rel->lateral_relids = parent->lateral_relids;
+ rel->lateral_referencers = parent->lateral_referencers;
+ }
+ else
+ {
+ rel->top_parent_relids = NULL;
+ rel->direct_lateral_relids = NULL;
+ rel->lateral_relids = NULL;
+ rel->lateral_referencers = NULL;
+ }
+
+ /* Check type of rtable entry */
+ switch (rte->rtekind)
+ {
+ case RTE_RELATION:
+ /* Table --- retrieve statistics from the system catalogs */
+ get_relation_info(root, rte->relid, rte->inh, rel);
+ break;
+ case RTE_SUBQUERY:
+ case RTE_FUNCTION:
+ case RTE_TABLEFUNC:
+ case RTE_VALUES:
+ case RTE_CTE:
+ case RTE_NAMEDTUPLESTORE:
+
+ /*
+ * Subquery, function, tablefunc, values list, CTE, or ENR --- set
+ * up attr range and arrays
+ *
+ * Note: 0 is included in range to support whole-row Vars
+ */
+ rel->min_attr = 0;
+ rel->max_attr = list_length(rte->eref->colnames);
+ rel->attr_needed = (Relids *)
+ palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids));
+ rel->attr_widths = (int32 *)
+ palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));
+ break;
+ case RTE_RESULT:
+ /* RTE_RESULT has no columns, nor could it have whole-row Var */
+ rel->min_attr = 0;
+ rel->max_attr = -1;
+ rel->attr_needed = NULL;
+ rel->attr_widths = NULL;
+ break;
+ default:
+ elog(ERROR, "unrecognized RTE kind: %d",
+ (int) rte->rtekind);
+ break;
+ }
+
+ /*
+ * Copy the parent's quals to the child, with appropriate substitution of
+ * variables. If any constant false or NULL clauses turn up, we can mark
+ * the child as dummy right away. (We must do this immediately so that
+ * pruning works correctly when recursing in expand_partitioned_rtentry.)
+ */
+ if (parent)
+ {
+ AppendRelInfo *appinfo = root->append_rel_array[relid];
+
+ Assert(appinfo != NULL);
+ if (!apply_child_basequals(root, parent, rel, rte, appinfo))
+ {
+ /*
+ * Some restriction clause reduced to constant FALSE or NULL after
+ * substitution, so this child need not be scanned.
+ */
+ mark_dummy_rel(rel);
+ }
+ }
+
+ /* Save the finished struct in the query's simple_rel_array */
+ root->simple_rel_array[relid] = rel;
+
+ return rel;
+}
+
+/*
+ * find_base_rel
+ * Find a base or other relation entry, which must already exist.
+ */
+RelOptInfo *
+find_base_rel(PlannerInfo *root, int relid)
+{
+ RelOptInfo *rel;
+
+ Assert(relid > 0);
+
+ if (relid < root->simple_rel_array_size)
+ {
+ rel = root->simple_rel_array[relid];
+ if (rel)
+ return rel;
+ }
+
+ elog(ERROR, "no relation entry for relid %d", relid);
+
+ return NULL; /* keep compiler quiet */
+}
+
+/*
+ * build_join_rel_hash
+ * Construct the auxiliary hash table for join relations.
+ */
+static void
+build_join_rel_hash(PlannerInfo *root)
+{
+ HTAB *hashtab;
+ HASHCTL hash_ctl;
+ ListCell *l;
+
+ /* Create the hash table */
+ hash_ctl.keysize = sizeof(Relids);
+ hash_ctl.entrysize = sizeof(JoinHashEntry);
+ hash_ctl.hash = bitmap_hash;
+ hash_ctl.match = bitmap_match;
+ hash_ctl.hcxt = CurrentMemoryContext;
+ hashtab = hash_create("JoinRelHashTable",
+ 256L,
+ &hash_ctl,
+ HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+
+ /* Insert all the already-existing joinrels */
+ foreach(l, root->join_rel_list)
+ {
+ RelOptInfo *rel = (RelOptInfo *) lfirst(l);
+ JoinHashEntry *hentry;
+ bool found;
+
+ hentry = (JoinHashEntry *) hash_search(hashtab,
+ &(rel->relids),
+ HASH_ENTER,
+ &found);
+ Assert(!found);
+ hentry->join_rel = rel;
+ }
+
+ root->join_rel_hash = hashtab;
+}
+
+/*
+ * find_join_rel
+ * Returns relation entry corresponding to 'relids' (a set of RT indexes),
+ * or NULL if none exists. This is for join relations.
+ */
+RelOptInfo *
+find_join_rel(PlannerInfo *root, Relids relids)
+{
+ /*
+ * Switch to using hash lookup when list grows "too long". The threshold
+ * is arbitrary and is known only here.
+ */
+ if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
+ build_join_rel_hash(root);
+
+ /*
+ * Use either hashtable lookup or linear search, as appropriate.
+ *
+ * Note: the seemingly redundant hashkey variable is used to avoid taking
+ * the address of relids; unless the compiler is exceedingly smart, doing
+ * so would force relids out of a register and thus probably slow down the
+ * list-search case.
+ */
+ if (root->join_rel_hash)
+ {
+ Relids hashkey = relids;
+ JoinHashEntry *hentry;
+
+ hentry = (JoinHashEntry *) hash_search(root->join_rel_hash,
+ &hashkey,
+ HASH_FIND,
+ NULL);
+ if (hentry)
+ return hentry->join_rel;
+ }
+ else
+ {
+ ListCell *l;
+
+ foreach(l, root->join_rel_list)
+ {
+ RelOptInfo *rel = (RelOptInfo *) lfirst(l);
+
+ if (bms_equal(rel->relids, relids))
+ return rel;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * set_foreign_rel_properties
+ * Set up foreign-join fields if outer and inner relation are foreign
+ * tables (or joins) belonging to the same server and assigned to the same
+ * user to check access permissions as.
+ *
+ * In addition to an exact match of userid, we allow the case where one side
+ * has zero userid (implying current user) and the other side has explicit
+ * userid that happens to equal the current user; but in that case, pushdown of
+ * the join is only valid for the current user. The useridiscurrent field
+ * records whether we had to make such an assumption for this join or any
+ * sub-join.
+ *
+ * Otherwise these fields are left invalid, so GetForeignJoinPaths will not be
+ * called for the join relation.
+ *
+ */
+static void
+set_foreign_rel_properties(RelOptInfo *joinrel, RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel)
+{
+ if (OidIsValid(outer_rel->serverid) &&
+ inner_rel->serverid == outer_rel->serverid)
+ {
+ if (inner_rel->userid == outer_rel->userid)
+ {
+ joinrel->serverid = outer_rel->serverid;
+ joinrel->userid = outer_rel->userid;
+ joinrel->useridiscurrent = outer_rel->useridiscurrent || inner_rel->useridiscurrent;
+ joinrel->fdwroutine = outer_rel->fdwroutine;
+ }
+ else if (!OidIsValid(inner_rel->userid) &&
+ outer_rel->userid == GetUserId())
+ {
+ joinrel->serverid = outer_rel->serverid;
+ joinrel->userid = outer_rel->userid;
+ joinrel->useridiscurrent = true;
+ joinrel->fdwroutine = outer_rel->fdwroutine;
+ }
+ else if (!OidIsValid(outer_rel->userid) &&
+ inner_rel->userid == GetUserId())
+ {
+ joinrel->serverid = outer_rel->serverid;
+ joinrel->userid = inner_rel->userid;
+ joinrel->useridiscurrent = true;
+ joinrel->fdwroutine = outer_rel->fdwroutine;
+ }
+ }
+}
+
+/*
+ * add_join_rel
+ * Add given join relation to the list of join relations in the given
+ * PlannerInfo. Also add it to the auxiliary hashtable if there is one.
+ */
+static void
+add_join_rel(PlannerInfo *root, RelOptInfo *joinrel)
+{
+ /* GEQO requires us to append the new joinrel to the end of the list! */
+ root->join_rel_list = lappend(root->join_rel_list, joinrel);
+
+ /* store it into the auxiliary hashtable if there is one. */
+ if (root->join_rel_hash)
+ {
+ JoinHashEntry *hentry;
+ bool found;
+
+ hentry = (JoinHashEntry *) hash_search(root->join_rel_hash,
+ &(joinrel->relids),
+ HASH_ENTER,
+ &found);
+ Assert(!found);
+ hentry->join_rel = joinrel;
+ }
+}
+
+/*
+ * build_join_rel
+ * Returns relation entry corresponding to the union of two given rels,
+ * creating a new relation entry if none already exists.
+ *
+ * 'joinrelids' is the Relids set that uniquely identifies the join
+ * 'outer_rel' and 'inner_rel' are relation nodes for the relations to be
+ * joined
+ * 'sjinfo': join context info
+ * 'restrictlist_ptr': result variable. If not NULL, *restrictlist_ptr
+ * receives the list of RestrictInfo nodes that apply to this
+ * particular pair of joinable relations.
+ *
+ * restrictlist_ptr makes the routine's API a little grotty, but it saves
+ * duplicated calculation of the restrictlist...
+ */
+RelOptInfo *
+build_join_rel(PlannerInfo *root,
+ Relids joinrelids,
+ RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel,
+ SpecialJoinInfo *sjinfo,
+ List **restrictlist_ptr)
+{
+ RelOptInfo *joinrel;
+ List *restrictlist;
+
+ /* This function should be used only for join between parents. */
+ Assert(!IS_OTHER_REL(outer_rel) && !IS_OTHER_REL(inner_rel));
+
+ /*
+ * See if we already have a joinrel for this set of base rels.
+ */
+ joinrel = find_join_rel(root, joinrelids);
+
+ if (joinrel)
+ {
+ /*
+ * Yes, so we only need to figure the restrictlist for this particular
+ * pair of component relations.
+ */
+ if (restrictlist_ptr)
+ *restrictlist_ptr = build_joinrel_restrictlist(root,
+ joinrel,
+ outer_rel,
+ inner_rel);
+ return joinrel;
+ }
+
+ /*
+ * Nope, so make one.
+ */
+ joinrel = makeNode(RelOptInfo);
+ joinrel->reloptkind = RELOPT_JOINREL;
+ joinrel->relids = bms_copy(joinrelids);
+ joinrel->rows = 0;
+ /* cheap startup cost is interesting iff not all tuples to be retrieved */
+ joinrel->consider_startup = (root->tuple_fraction > 0);
+ joinrel->consider_param_startup = false;
+ joinrel->consider_parallel = false;
+ joinrel->reltarget = create_empty_pathtarget();
+ joinrel->pathlist = NIL;
+ joinrel->ppilist = NIL;
+ joinrel->partial_pathlist = NIL;
+ joinrel->cheapest_startup_path = NULL;
+ joinrel->cheapest_total_path = NULL;
+ joinrel->cheapest_unique_path = NULL;
+ joinrel->cheapest_parameterized_paths = NIL;
+ /* init direct_lateral_relids from children; we'll finish it up below */
+ joinrel->direct_lateral_relids =
+ bms_union(outer_rel->direct_lateral_relids,
+ inner_rel->direct_lateral_relids);
+ joinrel->lateral_relids = min_join_parameterization(root, joinrel->relids,
+ outer_rel, inner_rel);
+ joinrel->relid = 0; /* indicates not a baserel */
+ joinrel->rtekind = RTE_JOIN;
+ joinrel->min_attr = 0;
+ joinrel->max_attr = 0;
+ joinrel->attr_needed = NULL;
+ joinrel->attr_widths = NULL;
+ joinrel->lateral_vars = NIL;
+ joinrel->lateral_referencers = NULL;
+ joinrel->indexlist = NIL;
+ joinrel->statlist = NIL;
+ joinrel->pages = 0;
+ joinrel->tuples = 0;
+ joinrel->allvisfrac = 0;
+ joinrel->eclass_indexes = NULL;
+ joinrel->subroot = NULL;
+ joinrel->subplan_params = NIL;
+ joinrel->rel_parallel_workers = -1;
+ joinrel->amflags = 0;
+ joinrel->serverid = InvalidOid;
+ joinrel->userid = InvalidOid;
+ joinrel->useridiscurrent = false;
+ joinrel->fdwroutine = NULL;
+ joinrel->fdw_private = NULL;
+ joinrel->unique_for_rels = NIL;
+ joinrel->non_unique_for_rels = NIL;
+ joinrel->baserestrictinfo = NIL;
+ joinrel->baserestrictcost.startup = 0;
+ joinrel->baserestrictcost.per_tuple = 0;
+ joinrel->baserestrict_min_security = UINT_MAX;
+ joinrel->joininfo = NIL;
+ joinrel->has_eclass_joins = false;
+ joinrel->consider_partitionwise_join = false; /* might get changed later */
+ joinrel->top_parent_relids = NULL;
+ joinrel->part_scheme = NULL;
+ joinrel->nparts = -1;
+ joinrel->boundinfo = NULL;
+ joinrel->partbounds_merged = false;
+ joinrel->partition_qual = NIL;
+ joinrel->part_rels = NULL;
+ joinrel->live_parts = NULL;
+ joinrel->all_partrels = NULL;
+ joinrel->partexprs = NULL;
+ joinrel->nullable_partexprs = NULL;
+
+ /* Compute information relevant to the foreign relations. */
+ set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
+
+ /*
+ * Create a new tlist containing just the vars that need to be output from
+ * this join (ie, are needed for higher joinclauses or final output).
+ *
+ * NOTE: the tlist order for a join rel will depend on which pair of outer
+ * and inner rels we first try to build it from. But the contents should
+ * be the same regardless.
+ */
+ build_joinrel_tlist(root, joinrel, outer_rel);
+ build_joinrel_tlist(root, joinrel, inner_rel);
+ add_placeholders_to_joinrel(root, joinrel, outer_rel, inner_rel);
+
+ /*
+ * add_placeholders_to_joinrel also took care of adding the ph_lateral
+ * sets of any PlaceHolderVars computed here to direct_lateral_relids, so
+ * now we can finish computing that. This is much like the computation of
+ * the transitively-closed lateral_relids in min_join_parameterization,
+ * except that here we *do* have to consider the added PHVs.
+ */
+ joinrel->direct_lateral_relids =
+ bms_del_members(joinrel->direct_lateral_relids, joinrel->relids);
+ if (bms_is_empty(joinrel->direct_lateral_relids))
+ joinrel->direct_lateral_relids = NULL;
+
+ /*
+ * Construct restrict and join clause lists for the new joinrel. (The
+ * caller might or might not need the restrictlist, but I need it anyway
+ * for set_joinrel_size_estimates().)
+ */
+ restrictlist = build_joinrel_restrictlist(root, joinrel,
+ outer_rel, inner_rel);
+ if (restrictlist_ptr)
+ *restrictlist_ptr = restrictlist;
+ build_joinrel_joinlist(joinrel, outer_rel, inner_rel);
+
+ /*
+ * This is also the right place to check whether the joinrel has any
+ * pending EquivalenceClass joins.
+ */
+ joinrel->has_eclass_joins = has_relevant_eclass_joinclause(root, joinrel);
+
+ /* Store the partition information. */
+ build_joinrel_partition_info(joinrel, outer_rel, inner_rel, restrictlist,
+ sjinfo->jointype);
+
+ /*
+ * Set estimates of the joinrel's size.
+ */
+ set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
+ sjinfo, restrictlist);
+
+ /*
+ * Set the consider_parallel flag if this joinrel could potentially be
+ * scanned within a parallel worker. If this flag is false for either
+ * inner_rel or outer_rel, then it must be false for the joinrel also.
+ * Even if both are true, there might be parallel-restricted expressions
+ * in the targetlist or quals.
+ *
+ * Note that if there are more than two rels in this relation, they could
+ * be divided between inner_rel and outer_rel in any arbitrary way. We
+ * assume this doesn't matter, because we should hit all the same baserels
+ * and joinclauses while building up to this joinrel no matter which we
+ * take; therefore, we should make the same decision here however we get
+ * here.
+ */
+ if (inner_rel->consider_parallel && outer_rel->consider_parallel &&
+ is_parallel_safe(root, (Node *) restrictlist) &&
+ is_parallel_safe(root, (Node *) joinrel->reltarget->exprs))
+ joinrel->consider_parallel = true;
+
+ /* Add the joinrel to the PlannerInfo. */
+ add_join_rel(root, joinrel);
+
+ /*
+ * Also, if dynamic-programming join search is active, add the new joinrel
+ * to the appropriate sublist. Note: you might think the Assert on number
+ * of members should be for equality, but some of the level 1 rels might
+ * have been joinrels already, so we can only assert <=.
+ */
+ if (root->join_rel_level)
+ {
+ Assert(root->join_cur_level > 0);
+ Assert(root->join_cur_level <= bms_num_members(joinrel->relids));
+ root->join_rel_level[root->join_cur_level] =
+ lappend(root->join_rel_level[root->join_cur_level], joinrel);
+ }
+
+ return joinrel;
+}
+
+/*
+ * build_child_join_rel
+ * Builds RelOptInfo representing join between given two child relations.
+ *
+ * 'outer_rel' and 'inner_rel' are the RelOptInfos of child relations being
+ * joined
+ * 'parent_joinrel' is the RelOptInfo representing the join between parent
+ * relations. Some of the members of new RelOptInfo are produced by
+ * translating corresponding members of this RelOptInfo
+ * 'sjinfo': child-join context info
+ * 'restrictlist': list of RestrictInfo nodes that apply to this particular
+ * pair of joinable relations
+ * 'jointype' is the join type (inner, left, full, etc)
+ */
+RelOptInfo *
+build_child_join_rel(PlannerInfo *root, RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel, RelOptInfo *parent_joinrel,
+ List *restrictlist, SpecialJoinInfo *sjinfo,
+ JoinType jointype)
+{
+ RelOptInfo *joinrel = makeNode(RelOptInfo);
+ AppendRelInfo **appinfos;
+ int nappinfos;
+
+ /* Only joins between "other" relations land here. */
+ Assert(IS_OTHER_REL(outer_rel) && IS_OTHER_REL(inner_rel));
+
+ /* The parent joinrel should have consider_partitionwise_join set. */
+ Assert(parent_joinrel->consider_partitionwise_join);
+
+ joinrel->reloptkind = RELOPT_OTHER_JOINREL;
+ joinrel->relids = bms_union(outer_rel->relids, inner_rel->relids);
+ joinrel->rows = 0;
+ /* cheap startup cost is interesting iff not all tuples to be retrieved */
+ joinrel->consider_startup = (root->tuple_fraction > 0);
+ joinrel->consider_param_startup = false;
+ joinrel->consider_parallel = false;
+ joinrel->reltarget = create_empty_pathtarget();
+ joinrel->pathlist = NIL;
+ joinrel->ppilist = NIL;
+ joinrel->partial_pathlist = NIL;
+ joinrel->cheapest_startup_path = NULL;
+ joinrel->cheapest_total_path = NULL;
+ joinrel->cheapest_unique_path = NULL;
+ joinrel->cheapest_parameterized_paths = NIL;
+ joinrel->direct_lateral_relids = NULL;
+ joinrel->lateral_relids = NULL;
+ joinrel->relid = 0; /* indicates not a baserel */
+ joinrel->rtekind = RTE_JOIN;
+ joinrel->min_attr = 0;
+ joinrel->max_attr = 0;
+ joinrel->attr_needed = NULL;
+ joinrel->attr_widths = NULL;
+ joinrel->lateral_vars = NIL;
+ joinrel->lateral_referencers = NULL;
+ joinrel->indexlist = NIL;
+ joinrel->pages = 0;
+ joinrel->tuples = 0;
+ joinrel->allvisfrac = 0;
+ joinrel->eclass_indexes = NULL;
+ joinrel->subroot = NULL;
+ joinrel->subplan_params = NIL;
+ joinrel->amflags = 0;
+ joinrel->serverid = InvalidOid;
+ joinrel->userid = InvalidOid;
+ joinrel->useridiscurrent = false;
+ joinrel->fdwroutine = NULL;
+ joinrel->fdw_private = NULL;
+ joinrel->baserestrictinfo = NIL;
+ joinrel->baserestrictcost.startup = 0;
+ joinrel->baserestrictcost.per_tuple = 0;
+ joinrel->joininfo = NIL;
+ joinrel->has_eclass_joins = false;
+ joinrel->consider_partitionwise_join = false; /* might get changed later */
+ joinrel->top_parent_relids = NULL;
+ joinrel->part_scheme = NULL;
+ joinrel->nparts = -1;
+ joinrel->boundinfo = NULL;
+ joinrel->partbounds_merged = false;
+ joinrel->partition_qual = NIL;
+ joinrel->part_rels = NULL;
+ joinrel->live_parts = NULL;
+ joinrel->all_partrels = NULL;
+ joinrel->partexprs = NULL;
+ joinrel->nullable_partexprs = NULL;
+
+ joinrel->top_parent_relids = bms_union(outer_rel->top_parent_relids,
+ inner_rel->top_parent_relids);
+
+ /* Compute information relevant to foreign relations. */
+ set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
+
+ /* Compute information needed for mapping Vars to the child rel */
+ appinfos = find_appinfos_by_relids(root, joinrel->relids, &nappinfos);
+
+ /* Set up reltarget struct */
+ build_child_join_reltarget(root, parent_joinrel, joinrel,
+ nappinfos, appinfos);
+
+ /* Construct joininfo list. */
+ joinrel->joininfo = (List *) adjust_appendrel_attrs(root,
+ (Node *) parent_joinrel->joininfo,
+ nappinfos,
+ appinfos);
+
+ /*
+ * Lateral relids referred in child join will be same as that referred in
+ * the parent relation.
+ */
+ joinrel->direct_lateral_relids = (Relids) bms_copy(parent_joinrel->direct_lateral_relids);
+ joinrel->lateral_relids = (Relids) bms_copy(parent_joinrel->lateral_relids);
+
+ /*
+ * If the parent joinrel has pending equivalence classes, so does the
+ * child.
+ */
+ joinrel->has_eclass_joins = parent_joinrel->has_eclass_joins;
+
+ /* Is the join between partitions itself partitioned? */
+ build_joinrel_partition_info(joinrel, outer_rel, inner_rel, restrictlist,
+ jointype);
+
+ /* Child joinrel is parallel safe if parent is parallel safe. */
+ joinrel->consider_parallel = parent_joinrel->consider_parallel;
+
+ /* Set estimates of the child-joinrel's size. */
+ set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
+ sjinfo, restrictlist);
+
+ /* We build the join only once. */
+ Assert(!find_join_rel(root, joinrel->relids));
+
+ /* Add the relation to the PlannerInfo. */
+ add_join_rel(root, joinrel);
+
+ /*
+ * We might need EquivalenceClass members corresponding to the child join,
+ * so that we can represent sort pathkeys for it. As with children of
+ * baserels, we shouldn't need this unless there are relevant eclass joins
+ * (implying that a merge join might be possible) or pathkeys to sort by.
+ */
+ if (joinrel->has_eclass_joins || has_useful_pathkeys(root, parent_joinrel))
+ add_child_join_rel_equivalences(root,
+ nappinfos, appinfos,
+ parent_joinrel, joinrel);
+
+ pfree(appinfos);
+
+ return joinrel;
+}
+
+/*
+ * min_join_parameterization
+ *
+ * Determine the minimum possible parameterization of a joinrel, that is, the
+ * set of other rels it contains LATERAL references to. We save this value in
+ * the join's RelOptInfo. This function is split out of build_join_rel()
+ * because join_is_legal() needs the value to check a prospective join.
+ */
+Relids
+min_join_parameterization(PlannerInfo *root,
+ Relids joinrelids,
+ RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel)
+{
+ Relids result;
+
+ /*
+ * Basically we just need the union of the inputs' lateral_relids, less
+ * whatever is already in the join.
+ *
+ * It's not immediately obvious that this is a valid way to compute the
+ * result, because it might seem that we're ignoring possible lateral refs
+ * of PlaceHolderVars that are due to be computed at the join but not in
+ * either input. However, because create_lateral_join_info() already
+ * charged all such PHV refs to each member baserel of the join, they'll
+ * be accounted for already in the inputs' lateral_relids. Likewise, we
+ * do not need to worry about doing transitive closure here, because that
+ * was already accounted for in the original baserel lateral_relids.
+ */
+ result = bms_union(outer_rel->lateral_relids, inner_rel->lateral_relids);
+ result = bms_del_members(result, joinrelids);
+
+ /* Maintain invariant that result is exactly NULL if empty */
+ if (bms_is_empty(result))
+ result = NULL;
+
+ return result;
+}
+
+/*
+ * build_joinrel_tlist
+ * Builds a join relation's target list from an input relation.
+ * (This is invoked twice to handle the two input relations.)
+ *
+ * The join's targetlist includes all Vars of its member relations that
+ * will still be needed above the join. This subroutine adds all such
+ * Vars from the specified input rel's tlist to the join rel's tlist.
+ *
+ * We also compute the expected width of the join's output, making use
+ * of data that was cached at the baserel level by set_rel_width().
+ */
+static void
+build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
+ RelOptInfo *input_rel)
+{
+ Relids relids = joinrel->relids;
+ ListCell *vars;
+
+ foreach(vars, input_rel->reltarget->exprs)
+ {
+ Var *var = (Var *) lfirst(vars);
+
+ /*
+ * Ignore PlaceHolderVars in the input tlists; we'll make our own
+ * decisions about whether to copy them.
+ */
+ if (IsA(var, PlaceHolderVar))
+ continue;
+
+ /*
+ * Otherwise, anything in a baserel or joinrel targetlist ought to be
+ * a Var. (More general cases can only appear in appendrel child
+ * rels, which will never be seen here.)
+ */
+ if (!IsA(var, Var))
+ elog(ERROR, "unexpected node type in rel targetlist: %d",
+ (int) nodeTag(var));
+
+ if (var->varno == ROWID_VAR)
+ {
+ /* UPDATE/DELETE/MERGE row identity vars are always needed */
+ RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *)
+ list_nth(root->row_identity_vars, var->varattno - 1);
+
+ joinrel->reltarget->exprs = lappend(joinrel->reltarget->exprs,
+ var);
+ /* Vars have cost zero, so no need to adjust reltarget->cost */
+ joinrel->reltarget->width += ridinfo->rowidwidth;
+ }
+ else
+ {
+ RelOptInfo *baserel;
+ int ndx;
+
+ /* Get the Var's original base rel */
+ baserel = find_base_rel(root, var->varno);
+
+ /* Is it still needed above this joinrel? */
+ ndx = var->varattno - baserel->min_attr;
+ if (bms_nonempty_difference(baserel->attr_needed[ndx], relids))
+ {
+ /* Yup, add it to the output */
+ joinrel->reltarget->exprs = lappend(joinrel->reltarget->exprs,
+ var);
+ /* Vars have cost zero, so no need to adjust reltarget->cost */
+ joinrel->reltarget->width += baserel->attr_widths[ndx];
+ }
+ }
+ }
+}
+
+/*
+ * build_joinrel_restrictlist
+ * build_joinrel_joinlist
+ * These routines build lists of restriction and join clauses for a
+ * join relation from the joininfo lists of the relations it joins.
+ *
+ * These routines are separate because the restriction list must be
+ * built afresh for each pair of input sub-relations we consider, whereas
+ * the join list need only be computed once for any join RelOptInfo.
+ * The join list is fully determined by the set of rels making up the
+ * joinrel, so we should get the same results (up to ordering) from any
+ * candidate pair of sub-relations. But the restriction list is whatever
+ * is not handled in the sub-relations, so it depends on which
+ * sub-relations are considered.
+ *
+ * If a join clause from an input relation refers to base rels still not
+ * present in the joinrel, then it is still a join clause for the joinrel;
+ * we put it into the joininfo list for the joinrel. Otherwise,
+ * the clause is now a restrict clause for the joined relation, and we
+ * return it to the caller of build_joinrel_restrictlist() to be stored in
+ * join paths made from this pair of sub-relations. (It will not need to
+ * be considered further up the join tree.)
+ *
+ * In many cases we will find the same RestrictInfos in both input
+ * relations' joinlists, so be careful to eliminate duplicates.
+ * Pointer equality should be a sufficient test for dups, since all
+ * the various joinlist entries ultimately refer to RestrictInfos
+ * pushed into them by distribute_restrictinfo_to_rels().
+ *
+ * 'joinrel' is a join relation node
+ * 'outer_rel' and 'inner_rel' are a pair of relations that can be joined
+ * to form joinrel.
+ *
+ * build_joinrel_restrictlist() returns a list of relevant restrictinfos,
+ * whereas build_joinrel_joinlist() stores its results in the joinrel's
+ * joininfo list. One or the other must accept each given clause!
+ *
+ * NB: Formerly, we made deep(!) copies of each input RestrictInfo to pass
+ * up to the join relation. I believe this is no longer necessary, because
+ * RestrictInfo nodes are no longer context-dependent. Instead, just include
+ * the original nodes in the lists made for the join relation.
+ */
+static List *
+build_joinrel_restrictlist(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel)
+{
+ List *result;
+
+ /*
+ * Collect all the clauses that syntactically belong at this level,
+ * eliminating any duplicates (important since we will see many of the
+ * same clauses arriving from both input relations).
+ */
+ result = subbuild_joinrel_restrictlist(joinrel, outer_rel->joininfo, NIL);
+ result = subbuild_joinrel_restrictlist(joinrel, inner_rel->joininfo, result);
+
+ /*
+ * Add on any clauses derived from EquivalenceClasses. These cannot be
+ * redundant with the clauses in the joininfo lists, so don't bother
+ * checking.
+ */
+ result = list_concat(result,
+ generate_join_implied_equalities(root,
+ joinrel->relids,
+ outer_rel->relids,
+ inner_rel));
+
+ return result;
+}
+
+static void
+build_joinrel_joinlist(RelOptInfo *joinrel,
+ RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel)
+{
+ List *result;
+
+ /*
+ * Collect all the clauses that syntactically belong above this level,
+ * eliminating any duplicates (important since we will see many of the
+ * same clauses arriving from both input relations).
+ */
+ result = subbuild_joinrel_joinlist(joinrel, outer_rel->joininfo, NIL);
+ result = subbuild_joinrel_joinlist(joinrel, inner_rel->joininfo, result);
+
+ joinrel->joininfo = result;
+}
+
+static List *
+subbuild_joinrel_restrictlist(RelOptInfo *joinrel,
+ List *joininfo_list,
+ List *new_restrictlist)
+{
+ ListCell *l;
+
+ foreach(l, joininfo_list)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
+
+ if (bms_is_subset(rinfo->required_relids, joinrel->relids))
+ {
+ /*
+ * This clause becomes a restriction clause for the joinrel, since
+ * it refers to no outside rels. Add it to the list, being
+ * careful to eliminate duplicates. (Since RestrictInfo nodes in
+ * different joinlists will have been multiply-linked rather than
+ * copied, pointer equality should be a sufficient test.)
+ */
+ new_restrictlist = list_append_unique_ptr(new_restrictlist, rinfo);
+ }
+ else
+ {
+ /*
+ * This clause is still a join clause at this level, so we ignore
+ * it in this routine.
+ */
+ }
+ }
+
+ return new_restrictlist;
+}
+
+static List *
+subbuild_joinrel_joinlist(RelOptInfo *joinrel,
+ List *joininfo_list,
+ List *new_joininfo)
+{
+ ListCell *l;
+
+ /* Expected to be called only for join between parent relations. */
+ Assert(joinrel->reloptkind == RELOPT_JOINREL);
+
+ foreach(l, joininfo_list)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
+
+ if (bms_is_subset(rinfo->required_relids, joinrel->relids))
+ {
+ /*
+ * This clause becomes a restriction clause for the joinrel, since
+ * it refers to no outside rels. So we can ignore it in this
+ * routine.
+ */
+ }
+ else
+ {
+ /*
+ * This clause is still a join clause at this level, so add it to
+ * the new joininfo list, being careful to eliminate duplicates.
+ * (Since RestrictInfo nodes in different joinlists will have been
+ * multiply-linked rather than copied, pointer equality should be
+ * a sufficient test.)
+ */
+ new_joininfo = list_append_unique_ptr(new_joininfo, rinfo);
+ }
+ }
+
+ return new_joininfo;
+}
+
+
+/*
+ * fetch_upper_rel
+ * Build a RelOptInfo describing some post-scan/join query processing,
+ * or return a pre-existing one if somebody already built it.
+ *
+ * An "upper" relation is identified by an UpperRelationKind and a Relids set.
+ * The meaning of the Relids set is not specified here, and very likely will
+ * vary for different relation kinds.
+ *
+ * Most of the fields in an upper-level RelOptInfo are not used and are not
+ * set here (though makeNode should ensure they're zeroes). We basically only
+ * care about fields that are of interest to add_path() and set_cheapest().
+ */
+RelOptInfo *
+fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
+{
+ RelOptInfo *upperrel;
+ ListCell *lc;
+
+ /*
+ * For the moment, our indexing data structure is just a List for each
+ * relation kind. If we ever get so many of one kind that this stops
+ * working well, we can improve it. No code outside this function should
+ * assume anything about how to find a particular upperrel.
+ */
+
+ /* If we already made this upperrel for the query, return it */
+ foreach(lc, root->upper_rels[kind])
+ {
+ upperrel = (RelOptInfo *) lfirst(lc);
+
+ if (bms_equal(upperrel->relids, relids))
+ return upperrel;
+ }
+
+ upperrel = makeNode(RelOptInfo);
+ upperrel->reloptkind = RELOPT_UPPER_REL;
+ upperrel->relids = bms_copy(relids);
+
+ /* cheap startup cost is interesting iff not all tuples to be retrieved */
+ upperrel->consider_startup = (root->tuple_fraction > 0);
+ upperrel->consider_param_startup = false;
+ upperrel->consider_parallel = false; /* might get changed later */
+ upperrel->reltarget = create_empty_pathtarget();
+ upperrel->pathlist = NIL;
+ upperrel->cheapest_startup_path = NULL;
+ upperrel->cheapest_total_path = NULL;
+ upperrel->cheapest_unique_path = NULL;
+ upperrel->cheapest_parameterized_paths = NIL;
+
+ root->upper_rels[kind] = lappend(root->upper_rels[kind], upperrel);
+
+ return upperrel;
+}
+
+
+/*
+ * find_childrel_parents
+ * Compute the set of parent relids of an appendrel child rel.
+ *
+ * Since appendrels can be nested, a child could have multiple levels of
+ * appendrel ancestors. This function computes a Relids set of all the
+ * parent relation IDs.
+ */
+Relids
+find_childrel_parents(PlannerInfo *root, RelOptInfo *rel)
+{
+ Relids result = NULL;
+
+ Assert(rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
+ Assert(rel->relid > 0 && rel->relid < root->simple_rel_array_size);
+
+ do
+ {
+ AppendRelInfo *appinfo = root->append_rel_array[rel->relid];
+ Index prelid = appinfo->parent_relid;
+
+ result = bms_add_member(result, prelid);
+
+ /* traverse up to the parent rel, loop if it's also a child rel */
+ rel = find_base_rel(root, prelid);
+ } while (rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
+
+ Assert(rel->reloptkind == RELOPT_BASEREL);
+
+ return result;
+}
+
+
+/*
+ * get_baserel_parampathinfo
+ * Get the ParamPathInfo for a parameterized path for a base relation,
+ * constructing one if we don't have one already.
+ *
+ * This centralizes estimating the rowcounts for parameterized paths.
+ * We need to cache those to be sure we use the same rowcount for all paths
+ * of the same parameterization for a given rel. This is also a convenient
+ * place to determine which movable join clauses the parameterized path will
+ * be responsible for evaluating.
+ */
+ParamPathInfo *
+get_baserel_parampathinfo(PlannerInfo *root, RelOptInfo *baserel,
+ Relids required_outer)
+{
+ ParamPathInfo *ppi;
+ Relids joinrelids;
+ List *pclauses;
+ double rows;
+ ListCell *lc;
+
+ /* If rel has LATERAL refs, every path for it should account for them */
+ Assert(bms_is_subset(baserel->lateral_relids, required_outer));
+
+ /* Unparameterized paths have no ParamPathInfo */
+ if (bms_is_empty(required_outer))
+ return NULL;
+
+ Assert(!bms_overlap(baserel->relids, required_outer));
+
+ /* If we already have a PPI for this parameterization, just return it */
+ if ((ppi = find_param_path_info(baserel, required_outer)))
+ return ppi;
+
+ /*
+ * Identify all joinclauses that are movable to this base rel given this
+ * parameterization.
+ */
+ joinrelids = bms_union(baserel->relids, required_outer);
+ pclauses = NIL;
+ foreach(lc, baserel->joininfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ if (join_clause_is_movable_into(rinfo,
+ baserel->relids,
+ joinrelids))
+ pclauses = lappend(pclauses, rinfo);
+ }
+
+ /*
+ * Add in joinclauses generated by EquivalenceClasses, too. (These
+ * necessarily satisfy join_clause_is_movable_into.)
+ */
+ pclauses = list_concat(pclauses,
+ generate_join_implied_equalities(root,
+ joinrelids,
+ required_outer,
+ baserel));
+
+ /* Estimate the number of rows returned by the parameterized scan */
+ rows = get_parameterized_baserel_size(root, baserel, pclauses);
+
+ /* And now we can build the ParamPathInfo */
+ ppi = makeNode(ParamPathInfo);
+ ppi->ppi_req_outer = required_outer;
+ ppi->ppi_rows = rows;
+ ppi->ppi_clauses = pclauses;
+ baserel->ppilist = lappend(baserel->ppilist, ppi);
+
+ return ppi;
+}
+
+/*
+ * get_joinrel_parampathinfo
+ * Get the ParamPathInfo for a parameterized path for a join relation,
+ * constructing one if we don't have one already.
+ *
+ * This centralizes estimating the rowcounts for parameterized paths.
+ * We need to cache those to be sure we use the same rowcount for all paths
+ * of the same parameterization for a given rel. This is also a convenient
+ * place to determine which movable join clauses the parameterized path will
+ * be responsible for evaluating.
+ *
+ * outer_path and inner_path are a pair of input paths that can be used to
+ * construct the join, and restrict_clauses is the list of regular join
+ * clauses (including clauses derived from EquivalenceClasses) that must be
+ * applied at the join node when using these inputs.
+ *
+ * Unlike the situation for base rels, the set of movable join clauses to be
+ * enforced at a join varies with the selected pair of input paths, so we
+ * must calculate that and pass it back, even if we already have a matching
+ * ParamPathInfo. We handle this by adding any clauses moved down to this
+ * join to *restrict_clauses, which is an in/out parameter. (The addition
+ * is done in such a way as to not modify the passed-in List structure.)
+ *
+ * Note: when considering a nestloop join, the caller must have removed from
+ * restrict_clauses any movable clauses that are themselves scheduled to be
+ * pushed into the right-hand path. We do not do that here since it's
+ * unnecessary for other join types.
+ */
+ParamPathInfo *
+get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel,
+ Path *outer_path,
+ Path *inner_path,
+ SpecialJoinInfo *sjinfo,
+ Relids required_outer,
+ List **restrict_clauses)
+{
+ ParamPathInfo *ppi;
+ Relids join_and_req;
+ Relids outer_and_req;
+ Relids inner_and_req;
+ List *pclauses;
+ List *eclauses;
+ List *dropped_ecs;
+ double rows;
+ ListCell *lc;
+
+ /* If rel has LATERAL refs, every path for it should account for them */
+ Assert(bms_is_subset(joinrel->lateral_relids, required_outer));
+
+ /* Unparameterized paths have no ParamPathInfo or extra join clauses */
+ if (bms_is_empty(required_outer))
+ return NULL;
+
+ Assert(!bms_overlap(joinrel->relids, required_outer));
+
+ /*
+ * Identify all joinclauses that are movable to this join rel given this
+ * parameterization. These are the clauses that are movable into this
+ * join, but not movable into either input path. Treat an unparameterized
+ * input path as not accepting parameterized clauses (because it won't,
+ * per the shortcut exit above), even though the joinclause movement rules
+ * might allow the same clauses to be moved into a parameterized path for
+ * that rel.
+ */
+ join_and_req = bms_union(joinrel->relids, required_outer);
+ if (outer_path->param_info)
+ outer_and_req = bms_union(outer_path->parent->relids,
+ PATH_REQ_OUTER(outer_path));
+ else
+ outer_and_req = NULL; /* outer path does not accept parameters */
+ if (inner_path->param_info)
+ inner_and_req = bms_union(inner_path->parent->relids,
+ PATH_REQ_OUTER(inner_path));
+ else
+ inner_and_req = NULL; /* inner path does not accept parameters */
+
+ pclauses = NIL;
+ foreach(lc, joinrel->joininfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ if (join_clause_is_movable_into(rinfo,
+ joinrel->relids,
+ join_and_req) &&
+ !join_clause_is_movable_into(rinfo,
+ outer_path->parent->relids,
+ outer_and_req) &&
+ !join_clause_is_movable_into(rinfo,
+ inner_path->parent->relids,
+ inner_and_req))
+ pclauses = lappend(pclauses, rinfo);
+ }
+
+ /* Consider joinclauses generated by EquivalenceClasses, too */
+ eclauses = generate_join_implied_equalities(root,
+ join_and_req,
+ required_outer,
+ joinrel);
+ /* We only want ones that aren't movable to lower levels */
+ dropped_ecs = NIL;
+ foreach(lc, eclauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ /*
+ * In principle, join_clause_is_movable_into() should accept anything
+ * returned by generate_join_implied_equalities(); but because its
+ * analysis is only approximate, sometimes it doesn't. So we
+ * currently cannot use this Assert; instead just assume it's okay to
+ * apply the joinclause at this level.
+ */
+#ifdef NOT_USED
+ Assert(join_clause_is_movable_into(rinfo,
+ joinrel->relids,
+ join_and_req));
+#endif
+ if (join_clause_is_movable_into(rinfo,
+ outer_path->parent->relids,
+ outer_and_req))
+ continue; /* drop if movable into LHS */
+ if (join_clause_is_movable_into(rinfo,
+ inner_path->parent->relids,
+ inner_and_req))
+ {
+ /* drop if movable into RHS, but remember EC for use below */
+ Assert(rinfo->left_ec == rinfo->right_ec);
+ dropped_ecs = lappend(dropped_ecs, rinfo->left_ec);
+ continue;
+ }
+ pclauses = lappend(pclauses, rinfo);
+ }
+
+ /*
+ * EquivalenceClasses are harder to deal with than we could wish, because
+ * of the fact that a given EC can generate different clauses depending on
+ * context. Suppose we have an EC {X.X, Y.Y, Z.Z} where X and Y are the
+ * LHS and RHS of the current join and Z is in required_outer, and further
+ * suppose that the inner_path is parameterized by both X and Z. The code
+ * above will have produced either Z.Z = X.X or Z.Z = Y.Y from that EC,
+ * and in the latter case will have discarded it as being movable into the
+ * RHS. However, the EC machinery might have produced either Y.Y = X.X or
+ * Y.Y = Z.Z as the EC enforcement clause within the inner_path; it will
+ * not have produced both, and we can't readily tell from here which one
+ * it did pick. If we add no clause to this join, we'll end up with
+ * insufficient enforcement of the EC; either Z.Z or X.X will fail to be
+ * constrained to be equal to the other members of the EC. (When we come
+ * to join Z to this X/Y path, we will certainly drop whichever EC clause
+ * is generated at that join, so this omission won't get fixed later.)
+ *
+ * To handle this, for each EC we discarded such a clause from, try to
+ * generate a clause connecting the required_outer rels to the join's LHS
+ * ("Z.Z = X.X" in the terms of the above example). If successful, and if
+ * the clause can't be moved to the LHS, add it to the current join's
+ * restriction clauses. (If an EC cannot generate such a clause then it
+ * has nothing that needs to be enforced here, while if the clause can be
+ * moved into the LHS then it should have been enforced within that path.)
+ *
+ * Note that we don't need similar processing for ECs whose clause was
+ * considered to be movable into the LHS, because the LHS can't refer to
+ * the RHS so there is no comparable ambiguity about what it might
+ * actually be enforcing internally.
+ */
+ if (dropped_ecs)
+ {
+ Relids real_outer_and_req;
+
+ real_outer_and_req = bms_union(outer_path->parent->relids,
+ required_outer);
+ eclauses =
+ generate_join_implied_equalities_for_ecs(root,
+ dropped_ecs,
+ real_outer_and_req,
+ required_outer,
+ outer_path->parent);
+ foreach(lc, eclauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ /* As above, can't quite assert this here */
+#ifdef NOT_USED
+ Assert(join_clause_is_movable_into(rinfo,
+ outer_path->parent->relids,
+ real_outer_and_req));
+#endif
+ if (!join_clause_is_movable_into(rinfo,
+ outer_path->parent->relids,
+ outer_and_req))
+ pclauses = lappend(pclauses, rinfo);
+ }
+ }
+
+ /*
+ * Now, attach the identified moved-down clauses to the caller's
+ * restrict_clauses list. By using list_concat in this order, we leave
+ * the original list structure of restrict_clauses undamaged.
+ */
+ *restrict_clauses = list_concat(pclauses, *restrict_clauses);
+
+ /* If we already have a PPI for this parameterization, just return it */
+ if ((ppi = find_param_path_info(joinrel, required_outer)))
+ return ppi;
+
+ /* Estimate the number of rows returned by the parameterized join */
+ rows = get_parameterized_joinrel_size(root, joinrel,
+ outer_path,
+ inner_path,
+ sjinfo,
+ *restrict_clauses);
+
+ /*
+ * And now we can build the ParamPathInfo. No point in saving the
+ * input-pair-dependent clause list, though.
+ *
+ * Note: in GEQO mode, we'll be called in a temporary memory context, but
+ * the joinrel structure is there too, so no problem.
+ */
+ ppi = makeNode(ParamPathInfo);
+ ppi->ppi_req_outer = required_outer;
+ ppi->ppi_rows = rows;
+ ppi->ppi_clauses = NIL;
+ joinrel->ppilist = lappend(joinrel->ppilist, ppi);
+
+ return ppi;
+}
+
+/*
+ * get_appendrel_parampathinfo
+ * Get the ParamPathInfo for a parameterized path for an append relation.
+ *
+ * For an append relation, the rowcount estimate will just be the sum of
+ * the estimates for its children. However, we still need a ParamPathInfo
+ * to flag the fact that the path requires parameters. So this just creates
+ * a suitable struct with zero ppi_rows (and no ppi_clauses either, since
+ * the Append node isn't responsible for checking quals).
+ */
+ParamPathInfo *
+get_appendrel_parampathinfo(RelOptInfo *appendrel, Relids required_outer)
+{
+ ParamPathInfo *ppi;
+
+ /* If rel has LATERAL refs, every path for it should account for them */
+ Assert(bms_is_subset(appendrel->lateral_relids, required_outer));
+
+ /* Unparameterized paths have no ParamPathInfo */
+ if (bms_is_empty(required_outer))
+ return NULL;
+
+ Assert(!bms_overlap(appendrel->relids, required_outer));
+
+ /* If we already have a PPI for this parameterization, just return it */
+ if ((ppi = find_param_path_info(appendrel, required_outer)))
+ return ppi;
+
+ /* Else build the ParamPathInfo */
+ ppi = makeNode(ParamPathInfo);
+ ppi->ppi_req_outer = required_outer;
+ ppi->ppi_rows = 0;
+ ppi->ppi_clauses = NIL;
+ appendrel->ppilist = lappend(appendrel->ppilist, ppi);
+
+ return ppi;
+}
+
+/*
+ * Returns a ParamPathInfo for the parameterization given by required_outer, if
+ * already available in the given rel. Returns NULL otherwise.
+ */
+ParamPathInfo *
+find_param_path_info(RelOptInfo *rel, Relids required_outer)
+{
+ ListCell *lc;
+
+ foreach(lc, rel->ppilist)
+ {
+ ParamPathInfo *ppi = (ParamPathInfo *) lfirst(lc);
+
+ if (bms_equal(ppi->ppi_req_outer, required_outer))
+ return ppi;
+ }
+
+ return NULL;
+}
+
+/*
+ * build_joinrel_partition_info
+ * Checks if the two relations being joined can use partitionwise join
+ * and if yes, initialize partitioning information of the resulting
+ * partitioned join relation.
+ */
+static void
+build_joinrel_partition_info(RelOptInfo *joinrel, RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel, List *restrictlist,
+ JoinType jointype)
+{
+ PartitionScheme part_scheme;
+
+ /* Nothing to do if partitionwise join technique is disabled. */
+ if (!enable_partitionwise_join)
+ {
+ Assert(!IS_PARTITIONED_REL(joinrel));
+ return;
+ }
+
+ /*
+ * We can only consider this join as an input to further partitionwise
+ * joins if (a) the input relations are partitioned and have
+ * consider_partitionwise_join=true, (b) the partition schemes match, and
+ * (c) we can identify an equi-join between the partition keys. Note that
+ * if it were possible for have_partkey_equi_join to return different
+ * answers for the same joinrel depending on which join ordering we try
+ * first, this logic would break. That shouldn't happen, though, because
+ * of the way the query planner deduces implied equalities and reorders
+ * the joins. Please see optimizer/README for details.
+ */
+ if (outer_rel->part_scheme == NULL || inner_rel->part_scheme == NULL ||
+ !outer_rel->consider_partitionwise_join ||
+ !inner_rel->consider_partitionwise_join ||
+ outer_rel->part_scheme != inner_rel->part_scheme ||
+ !have_partkey_equi_join(joinrel, outer_rel, inner_rel,
+ jointype, restrictlist))
+ {
+ Assert(!IS_PARTITIONED_REL(joinrel));
+ return;
+ }
+
+ part_scheme = outer_rel->part_scheme;
+
+ /*
+ * This function will be called only once for each joinrel, hence it
+ * should not have partitioning fields filled yet.
+ */
+ Assert(!joinrel->part_scheme && !joinrel->partexprs &&
+ !joinrel->nullable_partexprs && !joinrel->part_rels &&
+ !joinrel->boundinfo);
+
+ /*
+ * If the join relation is partitioned, it uses the same partitioning
+ * scheme as the joining relations.
+ *
+ * Note: we calculate the partition bounds, number of partitions, and
+ * child-join relations of the join relation in try_partitionwise_join().
+ */
+ joinrel->part_scheme = part_scheme;
+ set_joinrel_partition_key_exprs(joinrel, outer_rel, inner_rel, jointype);
+
+ /*
+ * Set the consider_partitionwise_join flag.
+ */
+ Assert(outer_rel->consider_partitionwise_join);
+ Assert(inner_rel->consider_partitionwise_join);
+ joinrel->consider_partitionwise_join = true;
+}
+
+/*
+ * have_partkey_equi_join
+ *
+ * Returns true if there exist equi-join conditions involving pairs
+ * of matching partition keys of the relations being joined for all
+ * partition keys.
+ */
+static bool
+have_partkey_equi_join(RelOptInfo *joinrel,
+ RelOptInfo *rel1, RelOptInfo *rel2,
+ JoinType jointype, List *restrictlist)
+{
+ PartitionScheme part_scheme = rel1->part_scheme;
+ ListCell *lc;
+ int cnt_pks;
+ bool pk_has_clause[PARTITION_MAX_KEYS];
+ bool strict_op;
+
+ /*
+ * This function must only be called when the joined relations have same
+ * partitioning scheme.
+ */
+ Assert(rel1->part_scheme == rel2->part_scheme);
+ Assert(part_scheme);
+
+ memset(pk_has_clause, 0, sizeof(pk_has_clause));
+ foreach(lc, restrictlist)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
+ OpExpr *opexpr;
+ Expr *expr1;
+ Expr *expr2;
+ int ipk1;
+ int ipk2;
+
+ /* If processing an outer join, only use its own join clauses. */
+ if (IS_OUTER_JOIN(jointype) &&
+ RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
+ continue;
+
+ /* Skip clauses which can not be used for a join. */
+ if (!rinfo->can_join)
+ continue;
+
+ /* Skip clauses which are not equality conditions. */
+ if (!rinfo->mergeopfamilies && !OidIsValid(rinfo->hashjoinoperator))
+ continue;
+
+ /* Should be OK to assume it's an OpExpr. */
+ opexpr = castNode(OpExpr, rinfo->clause);
+
+ /* Match the operands to the relation. */
+ if (bms_is_subset(rinfo->left_relids, rel1->relids) &&
+ bms_is_subset(rinfo->right_relids, rel2->relids))
+ {
+ expr1 = linitial(opexpr->args);
+ expr2 = lsecond(opexpr->args);
+ }
+ else if (bms_is_subset(rinfo->left_relids, rel2->relids) &&
+ bms_is_subset(rinfo->right_relids, rel1->relids))
+ {
+ expr1 = lsecond(opexpr->args);
+ expr2 = linitial(opexpr->args);
+ }
+ else
+ continue;
+
+ /*
+ * Now we need to know whether the join operator is strict; see
+ * comments in pathnodes.h.
+ */
+ strict_op = op_strict(opexpr->opno);
+
+ /*
+ * Only clauses referencing the partition keys are useful for
+ * partitionwise join.
+ */
+ ipk1 = match_expr_to_partition_keys(expr1, rel1, strict_op);
+ if (ipk1 < 0)
+ continue;
+ ipk2 = match_expr_to_partition_keys(expr2, rel2, strict_op);
+ if (ipk2 < 0)
+ continue;
+
+ /*
+ * If the clause refers to keys at different ordinal positions, it can
+ * not be used for partitionwise join.
+ */
+ if (ipk1 != ipk2)
+ continue;
+
+ /*
+ * The clause allows partitionwise join only if it uses the same
+ * operator family as that specified by the partition key.
+ */
+ if (rel1->part_scheme->strategy == PARTITION_STRATEGY_HASH)
+ {
+ if (!OidIsValid(rinfo->hashjoinoperator) ||
+ !op_in_opfamily(rinfo->hashjoinoperator,
+ part_scheme->partopfamily[ipk1]))
+ continue;
+ }
+ else if (!list_member_oid(rinfo->mergeopfamilies,
+ part_scheme->partopfamily[ipk1]))
+ continue;
+
+ /* Mark the partition key as having an equi-join clause. */
+ pk_has_clause[ipk1] = true;
+ }
+
+ /* Check whether every partition key has an equi-join condition. */
+ for (cnt_pks = 0; cnt_pks < part_scheme->partnatts; cnt_pks++)
+ {
+ if (!pk_has_clause[cnt_pks])
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * match_expr_to_partition_keys
+ *
+ * Tries to match an expression to one of the nullable or non-nullable
+ * partition keys of "rel". Returns the matched key's ordinal position,
+ * or -1 if the expression could not be matched to any of the keys.
+ *
+ * strict_op must be true if the expression will be compared with the
+ * partition key using a strict operator. This allows us to consider
+ * nullable as well as nonnullable partition keys.
+ */
+static int
+match_expr_to_partition_keys(Expr *expr, RelOptInfo *rel, bool strict_op)
+{
+ int cnt;
+
+ /* This function should be called only for partitioned relations. */
+ Assert(rel->part_scheme);
+ Assert(rel->partexprs);
+ Assert(rel->nullable_partexprs);
+
+ /* Remove any relabel decorations. */
+ while (IsA(expr, RelabelType))
+ expr = (Expr *) (castNode(RelabelType, expr))->arg;
+
+ for (cnt = 0; cnt < rel->part_scheme->partnatts; cnt++)
+ {
+ ListCell *lc;
+
+ /* We can always match to the non-nullable partition keys. */
+ foreach(lc, rel->partexprs[cnt])
+ {
+ if (equal(lfirst(lc), expr))
+ return cnt;
+ }
+
+ if (!strict_op)
+ continue;
+
+ /*
+ * If it's a strict join operator then a NULL partition key on one
+ * side will not join to any partition key on the other side, and in
+ * particular such a row can't join to a row from a different
+ * partition on the other side. So, it's okay to search the nullable
+ * partition keys as well.
+ */
+ foreach(lc, rel->nullable_partexprs[cnt])
+ {
+ if (equal(lfirst(lc), expr))
+ return cnt;
+ }
+ }
+
+ return -1;
+}
+
+/*
+ * set_joinrel_partition_key_exprs
+ * Initialize partition key expressions for a partitioned joinrel.
+ */
+static void
+set_joinrel_partition_key_exprs(RelOptInfo *joinrel,
+ RelOptInfo *outer_rel, RelOptInfo *inner_rel,
+ JoinType jointype)
+{
+ PartitionScheme part_scheme = joinrel->part_scheme;
+ int partnatts = part_scheme->partnatts;
+
+ joinrel->partexprs = (List **) palloc0(sizeof(List *) * partnatts);
+ joinrel->nullable_partexprs =
+ (List **) palloc0(sizeof(List *) * partnatts);
+
+ /*
+ * The joinrel's partition expressions are the same as those of the input
+ * rels, but we must properly classify them as nullable or not in the
+ * joinrel's output. (Also, we add some more partition expressions if
+ * it's a FULL JOIN.)
+ */
+ for (int cnt = 0; cnt < partnatts; cnt++)
+ {
+ /* mark these const to enforce that we copy them properly */
+ const List *outer_expr = outer_rel->partexprs[cnt];
+ const List *outer_null_expr = outer_rel->nullable_partexprs[cnt];
+ const List *inner_expr = inner_rel->partexprs[cnt];
+ const List *inner_null_expr = inner_rel->nullable_partexprs[cnt];
+ List *partexpr = NIL;
+ List *nullable_partexpr = NIL;
+ ListCell *lc;
+
+ switch (jointype)
+ {
+ /*
+ * A join relation resulting from an INNER join may be
+ * regarded as partitioned by either of the inner and outer
+ * relation keys. For example, A INNER JOIN B ON A.a = B.b
+ * can be regarded as partitioned on either A.a or B.b. So we
+ * add both keys to the joinrel's partexpr lists. However,
+ * anything that was already nullable still has to be treated
+ * as nullable.
+ */
+ case JOIN_INNER:
+ partexpr = list_concat_copy(outer_expr, inner_expr);
+ nullable_partexpr = list_concat_copy(outer_null_expr,
+ inner_null_expr);
+ break;
+
+ /*
+ * A join relation resulting from a SEMI or ANTI join may be
+ * regarded as partitioned by the outer relation keys. The
+ * inner relation's keys are no longer interesting; since they
+ * aren't visible in the join output, nothing could join to
+ * them.
+ */
+ case JOIN_SEMI:
+ case JOIN_ANTI:
+ partexpr = list_copy(outer_expr);
+ nullable_partexpr = list_copy(outer_null_expr);
+ break;
+
+ /*
+ * A join relation resulting from a LEFT OUTER JOIN likewise
+ * may be regarded as partitioned on the (non-nullable) outer
+ * relation keys. The inner (nullable) relation keys are okay
+ * as partition keys for further joins as long as they involve
+ * strict join operators.
+ */
+ case JOIN_LEFT:
+ partexpr = list_copy(outer_expr);
+ nullable_partexpr = list_concat_copy(inner_expr,
+ outer_null_expr);
+ nullable_partexpr = list_concat(nullable_partexpr,
+ inner_null_expr);
+ break;
+
+ /*
+ * For FULL OUTER JOINs, both relations are nullable, so the
+ * resulting join relation may be regarded as partitioned on
+ * either of inner and outer relation keys, but only for joins
+ * that involve strict join operators.
+ */
+ case JOIN_FULL:
+ nullable_partexpr = list_concat_copy(outer_expr,
+ inner_expr);
+ nullable_partexpr = list_concat(nullable_partexpr,
+ outer_null_expr);
+ nullable_partexpr = list_concat(nullable_partexpr,
+ inner_null_expr);
+
+ /*
+ * Also add CoalesceExprs corresponding to each possible
+ * full-join output variable (that is, left side coalesced to
+ * right side), so that we can match equijoin expressions
+ * using those variables. We really only need these for
+ * columns merged by JOIN USING, and only with the pairs of
+ * input items that correspond to the data structures that
+ * parse analysis would build for such variables. But it's
+ * hard to tell which those are, so just make all the pairs.
+ * Extra items in the nullable_partexprs list won't cause big
+ * problems. (It's possible that such items will get matched
+ * to user-written COALESCEs, but it should still be valid to
+ * partition on those, since they're going to be either the
+ * partition column or NULL; it's the same argument as for
+ * partitionwise nesting of any outer join.) We assume no
+ * type coercions are needed to make the coalesce expressions,
+ * since columns of different types won't have gotten
+ * classified as the same PartitionScheme.
+ */
+ foreach(lc, list_concat_copy(outer_expr, outer_null_expr))
+ {
+ Node *larg = (Node *) lfirst(lc);
+ ListCell *lc2;
+
+ foreach(lc2, list_concat_copy(inner_expr, inner_null_expr))
+ {
+ Node *rarg = (Node *) lfirst(lc2);
+ CoalesceExpr *c = makeNode(CoalesceExpr);
+
+ c->coalescetype = exprType(larg);
+ c->coalescecollid = exprCollation(larg);
+ c->args = list_make2(larg, rarg);
+ c->location = -1;
+ nullable_partexpr = lappend(nullable_partexpr, c);
+ }
+ }
+ break;
+
+ default:
+ elog(ERROR, "unrecognized join type: %d", (int) jointype);
+ }
+
+ joinrel->partexprs[cnt] = partexpr;
+ joinrel->nullable_partexprs[cnt] = nullable_partexpr;
+ }
+}
+
+/*
+ * build_child_join_reltarget
+ * Set up a child-join relation's reltarget from a parent-join relation.
+ */
+static void
+build_child_join_reltarget(PlannerInfo *root,
+ RelOptInfo *parentrel,
+ RelOptInfo *childrel,
+ int nappinfos,
+ AppendRelInfo **appinfos)
+{
+ /* Build the targetlist */
+ childrel->reltarget->exprs = (List *)
+ adjust_appendrel_attrs(root,
+ (Node *) parentrel->reltarget->exprs,
+ nappinfos, appinfos);
+
+ /* Set the cost and width fields */
+ childrel->reltarget->cost.startup = parentrel->reltarget->cost.startup;
+ childrel->reltarget->cost.per_tuple = parentrel->reltarget->cost.per_tuple;
+ childrel->reltarget->width = parentrel->reltarget->width;
+}
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
new file mode 100644
index 0000000..e7764f4
--- /dev/null
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -0,0 +1,655 @@
+/*-------------------------------------------------------------------------
+ *
+ * restrictinfo.c
+ * RestrictInfo node manipulation routines.
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/restrictinfo.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/restrictinfo.h"
+
+
+static RestrictInfo *make_restrictinfo_internal(PlannerInfo *root,
+ Expr *clause,
+ Expr *orclause,
+ bool is_pushed_down,
+ bool outerjoin_delayed,
+ bool pseudoconstant,
+ Index security_level,
+ Relids required_relids,
+ Relids outer_relids,
+ Relids nullable_relids);
+static Expr *make_sub_restrictinfos(PlannerInfo *root,
+ Expr *clause,
+ bool is_pushed_down,
+ bool outerjoin_delayed,
+ bool pseudoconstant,
+ Index security_level,
+ Relids required_relids,
+ Relids outer_relids,
+ Relids nullable_relids);
+
+
+/*
+ * make_restrictinfo
+ *
+ * Build a RestrictInfo node containing the given subexpression.
+ *
+ * The is_pushed_down, outerjoin_delayed, and pseudoconstant flags for the
+ * RestrictInfo must be supplied by the caller, as well as the correct values
+ * for security_level, outer_relids, and nullable_relids.
+ * required_relids can be NULL, in which case it defaults to the actual clause
+ * contents (i.e., clause_relids).
+ *
+ * We initialize fields that depend only on the given subexpression, leaving
+ * others that depend on context (or may never be needed at all) to be filled
+ * later.
+ */
+RestrictInfo *
+make_restrictinfo(PlannerInfo *root,
+ Expr *clause,
+ bool is_pushed_down,
+ bool outerjoin_delayed,
+ bool pseudoconstant,
+ Index security_level,
+ Relids required_relids,
+ Relids outer_relids,
+ Relids nullable_relids)
+{
+ /*
+ * If it's an OR clause, build a modified copy with RestrictInfos inserted
+ * above each subclause of the top-level AND/OR structure.
+ */
+ if (is_orclause(clause))
+ return (RestrictInfo *) make_sub_restrictinfos(root,
+ clause,
+ is_pushed_down,
+ outerjoin_delayed,
+ pseudoconstant,
+ security_level,
+ required_relids,
+ outer_relids,
+ nullable_relids);
+
+ /* Shouldn't be an AND clause, else AND/OR flattening messed up */
+ Assert(!is_andclause(clause));
+
+ return make_restrictinfo_internal(root,
+ clause,
+ NULL,
+ is_pushed_down,
+ outerjoin_delayed,
+ pseudoconstant,
+ security_level,
+ required_relids,
+ outer_relids,
+ nullable_relids);
+}
+
+/*
+ * make_restrictinfo_internal
+ *
+ * Common code for the main entry points and the recursive cases.
+ */
+static RestrictInfo *
+make_restrictinfo_internal(PlannerInfo *root,
+ Expr *clause,
+ Expr *orclause,
+ bool is_pushed_down,
+ bool outerjoin_delayed,
+ bool pseudoconstant,
+ Index security_level,
+ Relids required_relids,
+ Relids outer_relids,
+ Relids nullable_relids)
+{
+ RestrictInfo *restrictinfo = makeNode(RestrictInfo);
+
+ restrictinfo->clause = clause;
+ restrictinfo->orclause = orclause;
+ restrictinfo->is_pushed_down = is_pushed_down;
+ restrictinfo->outerjoin_delayed = outerjoin_delayed;
+ restrictinfo->pseudoconstant = pseudoconstant;
+ restrictinfo->can_join = false; /* may get set below */
+ restrictinfo->security_level = security_level;
+ restrictinfo->outer_relids = outer_relids;
+ restrictinfo->nullable_relids = nullable_relids;
+
+ /*
+ * If it's potentially delayable by lower-level security quals, figure out
+ * whether it's leakproof. We can skip testing this for level-zero quals,
+ * since they would never get delayed on security grounds anyway.
+ */
+ if (security_level > 0)
+ restrictinfo->leakproof = !contain_leaked_vars((Node *) clause);
+ else
+ restrictinfo->leakproof = false; /* really, "don't know" */
+
+ /*
+ * Mark volatility as unknown. The contain_volatile_functions function
+ * will determine if there are any volatile functions when called for the
+ * first time with this RestrictInfo.
+ */
+ restrictinfo->has_volatile = VOLATILITY_UNKNOWN;
+
+ /*
+ * If it's a binary opclause, set up left/right relids info. In any case
+ * set up the total clause relids info.
+ */
+ if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2)
+ {
+ restrictinfo->left_relids = pull_varnos(root, get_leftop(clause));
+ restrictinfo->right_relids = pull_varnos(root, get_rightop(clause));
+
+ restrictinfo->clause_relids = bms_union(restrictinfo->left_relids,
+ restrictinfo->right_relids);
+
+ /*
+ * Does it look like a normal join clause, i.e., a binary operator
+ * relating expressions that come from distinct relations? If so we
+ * might be able to use it in a join algorithm. Note that this is a
+ * purely syntactic test that is made regardless of context.
+ */
+ if (!bms_is_empty(restrictinfo->left_relids) &&
+ !bms_is_empty(restrictinfo->right_relids) &&
+ !bms_overlap(restrictinfo->left_relids,
+ restrictinfo->right_relids))
+ {
+ restrictinfo->can_join = true;
+ /* pseudoconstant should certainly not be true */
+ Assert(!restrictinfo->pseudoconstant);
+ }
+ }
+ else
+ {
+ /* Not a binary opclause, so mark left/right relid sets as empty */
+ restrictinfo->left_relids = NULL;
+ restrictinfo->right_relids = NULL;
+ /* and get the total relid set the hard way */
+ restrictinfo->clause_relids = pull_varnos(root, (Node *) clause);
+ }
+
+ /* required_relids defaults to clause_relids */
+ if (required_relids != NULL)
+ restrictinfo->required_relids = required_relids;
+ else
+ restrictinfo->required_relids = restrictinfo->clause_relids;
+
+ /*
+ * Fill in all the cacheable fields with "not yet set" markers. None of
+ * these will be computed until/unless needed. Note in particular that we
+ * don't mark a binary opclause as mergejoinable or hashjoinable here;
+ * that happens only if it appears in the right context (top level of a
+ * joinclause list).
+ */
+ restrictinfo->parent_ec = NULL;
+
+ restrictinfo->eval_cost.startup = -1;
+ restrictinfo->norm_selec = -1;
+ restrictinfo->outer_selec = -1;
+
+ restrictinfo->mergeopfamilies = NIL;
+
+ restrictinfo->left_ec = NULL;
+ restrictinfo->right_ec = NULL;
+ restrictinfo->left_em = NULL;
+ restrictinfo->right_em = NULL;
+ restrictinfo->scansel_cache = NIL;
+
+ restrictinfo->outer_is_left = false;
+
+ restrictinfo->hashjoinoperator = InvalidOid;
+
+ restrictinfo->left_bucketsize = -1;
+ restrictinfo->right_bucketsize = -1;
+ restrictinfo->left_mcvfreq = -1;
+ restrictinfo->right_mcvfreq = -1;
+
+ restrictinfo->left_hasheqoperator = InvalidOid;
+ restrictinfo->right_hasheqoperator = InvalidOid;
+
+ return restrictinfo;
+}
+
+/*
+ * Recursively insert sub-RestrictInfo nodes into a boolean expression.
+ *
+ * We put RestrictInfos above simple (non-AND/OR) clauses and above
+ * sub-OR clauses, but not above sub-AND clauses, because there's no need.
+ * This may seem odd but it is closely related to the fact that we use
+ * implicit-AND lists at top level of RestrictInfo lists. Only ORs and
+ * simple clauses are valid RestrictInfos.
+ *
+ * The same is_pushed_down, outerjoin_delayed, and pseudoconstant flag
+ * values can be applied to all RestrictInfo nodes in the result. Likewise
+ * for security_level, outer_relids, and nullable_relids.
+ *
+ * The given required_relids are attached to our top-level output,
+ * but any OR-clause constituents are allowed to default to just the
+ * contained rels.
+ */
+static Expr *
+make_sub_restrictinfos(PlannerInfo *root,
+ Expr *clause,
+ bool is_pushed_down,
+ bool outerjoin_delayed,
+ bool pseudoconstant,
+ Index security_level,
+ Relids required_relids,
+ Relids outer_relids,
+ Relids nullable_relids)
+{
+ if (is_orclause(clause))
+ {
+ List *orlist = NIL;
+ ListCell *temp;
+
+ foreach(temp, ((BoolExpr *) clause)->args)
+ orlist = lappend(orlist,
+ make_sub_restrictinfos(root,
+ lfirst(temp),
+ is_pushed_down,
+ outerjoin_delayed,
+ pseudoconstant,
+ security_level,
+ NULL,
+ outer_relids,
+ nullable_relids));
+ return (Expr *) make_restrictinfo_internal(root,
+ clause,
+ make_orclause(orlist),
+ is_pushed_down,
+ outerjoin_delayed,
+ pseudoconstant,
+ security_level,
+ required_relids,
+ outer_relids,
+ nullable_relids);
+ }
+ else if (is_andclause(clause))
+ {
+ List *andlist = NIL;
+ ListCell *temp;
+
+ foreach(temp, ((BoolExpr *) clause)->args)
+ andlist = lappend(andlist,
+ make_sub_restrictinfos(root,
+ lfirst(temp),
+ is_pushed_down,
+ outerjoin_delayed,
+ pseudoconstant,
+ security_level,
+ required_relids,
+ outer_relids,
+ nullable_relids));
+ return make_andclause(andlist);
+ }
+ else
+ return (Expr *) make_restrictinfo_internal(root,
+ clause,
+ NULL,
+ is_pushed_down,
+ outerjoin_delayed,
+ pseudoconstant,
+ security_level,
+ required_relids,
+ outer_relids,
+ nullable_relids);
+}
+
+/*
+ * commute_restrictinfo
+ *
+ * Given a RestrictInfo containing a binary opclause, produce a RestrictInfo
+ * representing the commutation of that clause. The caller must pass the
+ * OID of the commutator operator (which it's presumably looked up, else
+ * it would not know this is valid).
+ *
+ * Beware that the result shares sub-structure with the given RestrictInfo.
+ * That's okay for the intended usage with derived index quals, but might
+ * be hazardous if the source is subject to change. Also notice that we
+ * assume without checking that the commutator op is a member of the same
+ * btree and hash opclasses as the original op.
+ */
+RestrictInfo *
+commute_restrictinfo(RestrictInfo *rinfo, Oid comm_op)
+{
+ RestrictInfo *result;
+ OpExpr *newclause;
+ OpExpr *clause = castNode(OpExpr, rinfo->clause);
+
+ Assert(list_length(clause->args) == 2);
+
+ /* flat-copy all the fields of clause ... */
+ newclause = makeNode(OpExpr);
+ memcpy(newclause, clause, sizeof(OpExpr));
+
+ /* ... and adjust those we need to change to commute it */
+ newclause->opno = comm_op;
+ newclause->opfuncid = InvalidOid;
+ newclause->args = list_make2(lsecond(clause->args),
+ linitial(clause->args));
+
+ /* likewise, flat-copy all the fields of rinfo ... */
+ result = makeNode(RestrictInfo);
+ memcpy(result, rinfo, sizeof(RestrictInfo));
+
+ /*
+ * ... and adjust those we need to change. Note in particular that we can
+ * preserve any cached selectivity or cost estimates, since those ought to
+ * be the same for the new clause. Likewise we can keep the source's
+ * parent_ec.
+ */
+ result->clause = (Expr *) newclause;
+ result->left_relids = rinfo->right_relids;
+ result->right_relids = rinfo->left_relids;
+ Assert(result->orclause == NULL);
+ result->left_ec = rinfo->right_ec;
+ result->right_ec = rinfo->left_ec;
+ result->left_em = rinfo->right_em;
+ result->right_em = rinfo->left_em;
+ result->scansel_cache = NIL; /* not worth updating this */
+ if (rinfo->hashjoinoperator == clause->opno)
+ result->hashjoinoperator = comm_op;
+ else
+ result->hashjoinoperator = InvalidOid;
+ result->left_bucketsize = rinfo->right_bucketsize;
+ result->right_bucketsize = rinfo->left_bucketsize;
+ result->left_mcvfreq = rinfo->right_mcvfreq;
+ result->right_mcvfreq = rinfo->left_mcvfreq;
+ result->left_hasheqoperator = InvalidOid;
+ result->right_hasheqoperator = InvalidOid;
+
+ return result;
+}
+
+/*
+ * restriction_is_or_clause
+ *
+ * Returns t iff the restrictinfo node contains an 'or' clause.
+ */
+bool
+restriction_is_or_clause(RestrictInfo *restrictinfo)
+{
+ if (restrictinfo->orclause != NULL)
+ return true;
+ else
+ return false;
+}
+
+/*
+ * restriction_is_securely_promotable
+ *
+ * Returns true if it's okay to evaluate this clause "early", that is before
+ * other restriction clauses attached to the specified relation.
+ */
+bool
+restriction_is_securely_promotable(RestrictInfo *restrictinfo,
+ RelOptInfo *rel)
+{
+ /*
+ * It's okay if there are no baserestrictinfo clauses for the rel that
+ * would need to go before this one, *or* if this one is leakproof.
+ */
+ if (restrictinfo->security_level <= rel->baserestrict_min_security ||
+ restrictinfo->leakproof)
+ return true;
+ else
+ return false;
+}
+
+/*
+ * get_actual_clauses
+ *
+ * Returns a list containing the bare clauses from 'restrictinfo_list'.
+ *
+ * This is only to be used in cases where none of the RestrictInfos can
+ * be pseudoconstant clauses (for instance, it's OK on indexqual lists).
+ */
+List *
+get_actual_clauses(List *restrictinfo_list)
+{
+ List *result = NIL;
+ ListCell *l;
+
+ foreach(l, restrictinfo_list)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+
+ Assert(!rinfo->pseudoconstant);
+
+ result = lappend(result, rinfo->clause);
+ }
+ return result;
+}
+
+/*
+ * extract_actual_clauses
+ *
+ * Extract bare clauses from 'restrictinfo_list', returning either the
+ * regular ones or the pseudoconstant ones per 'pseudoconstant'.
+ */
+List *
+extract_actual_clauses(List *restrictinfo_list,
+ bool pseudoconstant)
+{
+ List *result = NIL;
+ ListCell *l;
+
+ foreach(l, restrictinfo_list)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+
+ if (rinfo->pseudoconstant == pseudoconstant)
+ result = lappend(result, rinfo->clause);
+ }
+ return result;
+}
+
+/*
+ * extract_actual_join_clauses
+ *
+ * Extract bare clauses from 'restrictinfo_list', separating those that
+ * semantically match the join level from those that were pushed down.
+ * Pseudoconstant clauses are excluded from the results.
+ *
+ * This is only used at outer joins, since for plain joins we don't care
+ * about pushed-down-ness.
+ */
+void
+extract_actual_join_clauses(List *restrictinfo_list,
+ Relids joinrelids,
+ List **joinquals,
+ List **otherquals)
+{
+ ListCell *l;
+
+ *joinquals = NIL;
+ *otherquals = NIL;
+
+ foreach(l, restrictinfo_list)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+
+ if (RINFO_IS_PUSHED_DOWN(rinfo, joinrelids))
+ {
+ if (!rinfo->pseudoconstant)
+ *otherquals = lappend(*otherquals, rinfo->clause);
+ }
+ else
+ {
+ /* joinquals shouldn't have been marked pseudoconstant */
+ Assert(!rinfo->pseudoconstant);
+ *joinquals = lappend(*joinquals, rinfo->clause);
+ }
+ }
+}
+
+/*
+ * has_pseudoconstant_clauses
+ *
+ * Returns true if 'restrictinfo_list' includes pseudoconstant clauses.
+ *
+ * This is used when we determine whether to allow extensions to consider
+ * pushing down joins in add_paths_to_joinrel().
+ */
+bool
+has_pseudoconstant_clauses(PlannerInfo *root,
+ List *restrictinfo_list)
+{
+ ListCell *l;
+
+ /* No need to look if we know there are no pseudoconstants */
+ if (!root->hasPseudoConstantQuals)
+ return false;
+
+ /* See if there are pseudoconstants in the RestrictInfo list */
+ foreach(l, restrictinfo_list)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
+
+ if (rinfo->pseudoconstant)
+ return true;
+ }
+ return false;
+}
+
+
+/*
+ * join_clause_is_movable_to
+ * Test whether a join clause is a safe candidate for parameterization
+ * of a scan on the specified base relation.
+ *
+ * A movable join clause is one that can safely be evaluated at a rel below
+ * its normal semantic level (ie, its required_relids), if the values of
+ * variables that it would need from other rels are provided.
+ *
+ * We insist that the clause actually reference the target relation; this
+ * prevents undesirable movement of degenerate join clauses, and ensures
+ * that there is a unique place that a clause can be moved down to.
+ *
+ * We cannot move an outer-join clause into the non-nullable side of its
+ * outer join, as that would change the results (rows would be suppressed
+ * rather than being null-extended).
+ *
+ * Also there must not be an outer join below the clause that would null the
+ * Vars coming from the target relation. Otherwise the clause might give
+ * results different from what it would give at its normal semantic level.
+ *
+ * Also, the join clause must not use any relations that have LATERAL
+ * references to the target relation, since we could not put such rels on
+ * the outer side of a nestloop with the target relation.
+ */
+bool
+join_clause_is_movable_to(RestrictInfo *rinfo, RelOptInfo *baserel)
+{
+ /* Clause must physically reference target rel */
+ if (!bms_is_member(baserel->relid, rinfo->clause_relids))
+ return false;
+
+ /* Cannot move an outer-join clause into the join's outer side */
+ if (bms_is_member(baserel->relid, rinfo->outer_relids))
+ return false;
+
+ /* Target rel must not be nullable below the clause */
+ if (bms_is_member(baserel->relid, rinfo->nullable_relids))
+ return false;
+
+ /* Clause must not use any rels with LATERAL references to this rel */
+ if (bms_overlap(baserel->lateral_referencers, rinfo->clause_relids))
+ return false;
+
+ return true;
+}
+
+/*
+ * join_clause_is_movable_into
+ * Test whether a join clause is movable and can be evaluated within
+ * the current join context.
+ *
+ * currentrelids: the relids of the proposed evaluation location
+ * current_and_outer: the union of currentrelids and the required_outer
+ * relids (parameterization's outer relations)
+ *
+ * The API would be a bit clearer if we passed the current relids and the
+ * outer relids separately and did bms_union internally; but since most
+ * callers need to apply this function to multiple clauses, we make the
+ * caller perform the union.
+ *
+ * Obviously, the clause must only refer to Vars available from the current
+ * relation plus the outer rels. We also check that it does reference at
+ * least one current Var, ensuring that the clause will be pushed down to
+ * a unique place in a parameterized join tree. And we check that we're
+ * not pushing the clause into its outer-join outer side, nor down into
+ * a lower outer join's inner side.
+ *
+ * The check about pushing a clause down into a lower outer join's inner side
+ * is only approximate; it sometimes returns "false" when actually it would
+ * be safe to use the clause here because we're still above the outer join
+ * in question. This is okay as long as the answers at different join levels
+ * are consistent: it just means we might sometimes fail to push a clause as
+ * far down as it could safely be pushed. It's unclear whether it would be
+ * worthwhile to do this more precisely. (But if it's ever fixed to be
+ * exactly accurate, there's an Assert in get_joinrel_parampathinfo() that
+ * should be re-enabled.)
+ *
+ * There's no check here equivalent to join_clause_is_movable_to's test on
+ * lateral_referencers. We assume the caller wouldn't be inquiring unless
+ * it'd verified that the proposed outer rels don't have lateral references
+ * to the current rel(s). (If we are considering join paths with the outer
+ * rels on the outside and the current rels on the inside, then this should
+ * have been checked at the outset of such consideration; see join_is_legal
+ * and the path parameterization checks in joinpath.c.) On the other hand,
+ * in join_clause_is_movable_to we are asking whether the clause could be
+ * moved for some valid set of outer rels, so we don't have the benefit of
+ * relying on prior checks for lateral-reference validity.
+ *
+ * Note: if this returns true, it means that the clause could be moved to
+ * this join relation, but that doesn't mean that this is the lowest join
+ * it could be moved to. Caller may need to make additional calls to verify
+ * that this doesn't succeed on either of the inputs of a proposed join.
+ *
+ * Note: get_joinrel_parampathinfo depends on the fact that if
+ * current_and_outer is NULL, this function will always return false
+ * (since one or the other of the first two tests must fail).
+ */
+bool
+join_clause_is_movable_into(RestrictInfo *rinfo,
+ Relids currentrelids,
+ Relids current_and_outer)
+{
+ /* Clause must be evaluable given available context */
+ if (!bms_is_subset(rinfo->clause_relids, current_and_outer))
+ return false;
+
+ /* Clause must physically reference at least one target rel */
+ if (!bms_overlap(currentrelids, rinfo->clause_relids))
+ return false;
+
+ /* Cannot move an outer-join clause into the join's outer side */
+ if (bms_overlap(currentrelids, rinfo->outer_relids))
+ return false;
+
+ /*
+ * Target rel(s) must not be nullable below the clause. This is
+ * approximate, in the safe direction, because the current join might be
+ * above the join where the nulling would happen, in which case the clause
+ * would work correctly here. But we don't have enough info to be sure.
+ */
+ if (bms_overlap(currentrelids, rinfo->nullable_relids))
+ return false;
+
+ return true;
+}
diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c
new file mode 100644
index 0000000..eed3e3f
--- /dev/null
+++ b/src/backend/optimizer/util/tlist.c
@@ -0,0 +1,1258 @@
+/*-------------------------------------------------------------------------
+ *
+ * tlist.c
+ * Target list manipulation routines
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/tlist.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/tlist.h"
+
+
+/*
+ * Test if an expression node represents a SRF call. Beware multiple eval!
+ *
+ * Please note that this is only meant for use in split_pathtarget_at_srfs();
+ * if you use it anywhere else, your code is almost certainly wrong for SRFs
+ * nested within expressions. Use expression_returns_set() instead.
+ */
+#define IS_SRF_CALL(node) \
+ ((IsA(node, FuncExpr) && ((FuncExpr *) (node))->funcretset) || \
+ (IsA(node, OpExpr) && ((OpExpr *) (node))->opretset))
+
+/*
+ * Data structures for split_pathtarget_at_srfs(). To preserve the identity
+ * of sortgroupref items even if they are textually equal(), what we track is
+ * not just bare expressions but expressions plus their sortgroupref indexes.
+ */
+typedef struct
+{
+ Node *expr; /* some subexpression of a PathTarget */
+ Index sortgroupref; /* its sortgroupref, or 0 if none */
+} split_pathtarget_item;
+
+typedef struct
+{
+ /* This is a List of bare expressions: */
+ List *input_target_exprs; /* exprs available from input */
+ /* These are Lists of Lists of split_pathtarget_items: */
+ List *level_srfs; /* SRF exprs to evaluate at each level */
+ List *level_input_vars; /* input vars needed at each level */
+ List *level_input_srfs; /* input SRFs needed at each level */
+ /* These are Lists of split_pathtarget_items: */
+ List *current_input_vars; /* vars needed in current subexpr */
+ List *current_input_srfs; /* SRFs needed in current subexpr */
+ /* Auxiliary data for current split_pathtarget_walker traversal: */
+ int current_depth; /* max SRF depth in current subexpr */
+ Index current_sgref; /* current subexpr's sortgroupref, or 0 */
+} split_pathtarget_context;
+
+static bool split_pathtarget_walker(Node *node,
+ split_pathtarget_context *context);
+static void add_sp_item_to_pathtarget(PathTarget *target,
+ split_pathtarget_item *item);
+static void add_sp_items_to_pathtarget(PathTarget *target, List *items);
+
+
+/*****************************************************************************
+ * Target list creation and searching utilities
+ *****************************************************************************/
+
+/*
+ * tlist_member
+ * Finds the (first) member of the given tlist whose expression is
+ * equal() to the given expression. Result is NULL if no such member.
+ */
+TargetEntry *
+tlist_member(Expr *node, List *targetlist)
+{
+ ListCell *temp;
+
+ foreach(temp, targetlist)
+ {
+ TargetEntry *tlentry = (TargetEntry *) lfirst(temp);
+
+ if (equal(node, tlentry->expr))
+ return tlentry;
+ }
+ return NULL;
+}
+
+/*
+ * tlist_member_match_var
+ * Same as above, except that we match the provided Var on the basis
+ * of varno/varattno/varlevelsup/vartype only, rather than full equal().
+ *
+ * This is needed in some cases where we can't be sure of an exact typmod
+ * match. For safety, though, we insist on vartype match.
+ */
+static TargetEntry *
+tlist_member_match_var(Var *var, List *targetlist)
+{
+ ListCell *temp;
+
+ foreach(temp, targetlist)
+ {
+ TargetEntry *tlentry = (TargetEntry *) lfirst(temp);
+ Var *tlvar = (Var *) tlentry->expr;
+
+ if (!tlvar || !IsA(tlvar, Var))
+ continue;
+ if (var->varno == tlvar->varno &&
+ var->varattno == tlvar->varattno &&
+ var->varlevelsup == tlvar->varlevelsup &&
+ var->vartype == tlvar->vartype)
+ return tlentry;
+ }
+ return NULL;
+}
+
+/*
+ * add_to_flat_tlist
+ * Add more items to a flattened tlist (if they're not already in it)
+ *
+ * 'tlist' is the flattened tlist
+ * 'exprs' is a list of expressions (usually, but not necessarily, Vars)
+ *
+ * Returns the extended tlist.
+ */
+List *
+add_to_flat_tlist(List *tlist, List *exprs)
+{
+ int next_resno = list_length(tlist) + 1;
+ ListCell *lc;
+
+ foreach(lc, exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+
+ if (!tlist_member(expr, tlist))
+ {
+ TargetEntry *tle;
+
+ tle = makeTargetEntry(copyObject(expr), /* copy needed?? */
+ next_resno++,
+ NULL,
+ false);
+ tlist = lappend(tlist, tle);
+ }
+ }
+ return tlist;
+}
+
+
+/*
+ * get_tlist_exprs
+ * Get just the expression subtrees of a tlist
+ *
+ * Resjunk columns are ignored unless includeJunk is true
+ */
+List *
+get_tlist_exprs(List *tlist, bool includeJunk)
+{
+ List *result = NIL;
+ ListCell *l;
+
+ foreach(l, tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (tle->resjunk && !includeJunk)
+ continue;
+
+ result = lappend(result, tle->expr);
+ }
+ return result;
+}
+
+
+/*
+ * count_nonjunk_tlist_entries
+ * What it says ...
+ */
+int
+count_nonjunk_tlist_entries(List *tlist)
+{
+ int len = 0;
+ ListCell *l;
+
+ foreach(l, tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (!tle->resjunk)
+ len++;
+ }
+ return len;
+}
+
+
+/*
+ * tlist_same_exprs
+ * Check whether two target lists contain the same expressions
+ *
+ * Note: this function is used to decide whether it's safe to jam a new tlist
+ * into a non-projection-capable plan node. Obviously we can't do that unless
+ * the node's tlist shows it already returns the column values we want.
+ * However, we can ignore the TargetEntry attributes resname, ressortgroupref,
+ * resorigtbl, resorigcol, and resjunk, because those are only labelings that
+ * don't affect the row values computed by the node. (Moreover, if we didn't
+ * ignore them, we'd frequently fail to make the desired optimization, since
+ * the planner tends to not bother to make resname etc. valid in intermediate
+ * plan nodes.) Note that on success, the caller must still jam the desired
+ * tlist into the plan node, else it won't have the desired labeling fields.
+ */
+bool
+tlist_same_exprs(List *tlist1, List *tlist2)
+{
+ ListCell *lc1,
+ *lc2;
+
+ if (list_length(tlist1) != list_length(tlist2))
+ return false; /* not same length, so can't match */
+
+ forboth(lc1, tlist1, lc2, tlist2)
+ {
+ TargetEntry *tle1 = (TargetEntry *) lfirst(lc1);
+ TargetEntry *tle2 = (TargetEntry *) lfirst(lc2);
+
+ if (!equal(tle1->expr, tle2->expr))
+ return false;
+ }
+
+ return true;
+}
+
+
+/*
+ * Does tlist have same output datatypes as listed in colTypes?
+ *
+ * Resjunk columns are ignored if junkOK is true; otherwise presence of
+ * a resjunk column will always cause a 'false' result.
+ *
+ * Note: currently no callers care about comparing typmods.
+ */
+bool
+tlist_same_datatypes(List *tlist, List *colTypes, bool junkOK)
+{
+ ListCell *l;
+ ListCell *curColType = list_head(colTypes);
+
+ foreach(l, tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (tle->resjunk)
+ {
+ if (!junkOK)
+ return false;
+ }
+ else
+ {
+ if (curColType == NULL)
+ return false; /* tlist longer than colTypes */
+ if (exprType((Node *) tle->expr) != lfirst_oid(curColType))
+ return false;
+ curColType = lnext(colTypes, curColType);
+ }
+ }
+ if (curColType != NULL)
+ return false; /* tlist shorter than colTypes */
+ return true;
+}
+
+/*
+ * Does tlist have same exposed collations as listed in colCollations?
+ *
+ * Identical logic to the above, but for collations.
+ */
+bool
+tlist_same_collations(List *tlist, List *colCollations, bool junkOK)
+{
+ ListCell *l;
+ ListCell *curColColl = list_head(colCollations);
+
+ foreach(l, tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (tle->resjunk)
+ {
+ if (!junkOK)
+ return false;
+ }
+ else
+ {
+ if (curColColl == NULL)
+ return false; /* tlist longer than colCollations */
+ if (exprCollation((Node *) tle->expr) != lfirst_oid(curColColl))
+ return false;
+ curColColl = lnext(colCollations, curColColl);
+ }
+ }
+ if (curColColl != NULL)
+ return false; /* tlist shorter than colCollations */
+ return true;
+}
+
+/*
+ * apply_tlist_labeling
+ * Apply the TargetEntry labeling attributes of src_tlist to dest_tlist
+ *
+ * This is useful for reattaching column names etc to a plan's final output
+ * targetlist.
+ */
+void
+apply_tlist_labeling(List *dest_tlist, List *src_tlist)
+{
+ ListCell *ld,
+ *ls;
+
+ Assert(list_length(dest_tlist) == list_length(src_tlist));
+ forboth(ld, dest_tlist, ls, src_tlist)
+ {
+ TargetEntry *dest_tle = (TargetEntry *) lfirst(ld);
+ TargetEntry *src_tle = (TargetEntry *) lfirst(ls);
+
+ Assert(dest_tle->resno == src_tle->resno);
+ dest_tle->resname = src_tle->resname;
+ dest_tle->ressortgroupref = src_tle->ressortgroupref;
+ dest_tle->resorigtbl = src_tle->resorigtbl;
+ dest_tle->resorigcol = src_tle->resorigcol;
+ dest_tle->resjunk = src_tle->resjunk;
+ }
+}
+
+
+/*
+ * get_sortgroupref_tle
+ * Find the targetlist entry matching the given SortGroupRef index,
+ * and return it.
+ */
+TargetEntry *
+get_sortgroupref_tle(Index sortref, List *targetList)
+{
+ ListCell *l;
+
+ foreach(l, targetList)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (tle->ressortgroupref == sortref)
+ return tle;
+ }
+
+ elog(ERROR, "ORDER/GROUP BY expression not found in targetlist");
+ return NULL; /* keep compiler quiet */
+}
+
+/*
+ * get_sortgroupclause_tle
+ * Find the targetlist entry matching the given SortGroupClause
+ * by ressortgroupref, and return it.
+ */
+TargetEntry *
+get_sortgroupclause_tle(SortGroupClause *sgClause,
+ List *targetList)
+{
+ return get_sortgroupref_tle(sgClause->tleSortGroupRef, targetList);
+}
+
+/*
+ * get_sortgroupclause_expr
+ * Find the targetlist entry matching the given SortGroupClause
+ * by ressortgroupref, and return its expression.
+ */
+Node *
+get_sortgroupclause_expr(SortGroupClause *sgClause, List *targetList)
+{
+ TargetEntry *tle = get_sortgroupclause_tle(sgClause, targetList);
+
+ return (Node *) tle->expr;
+}
+
+/*
+ * get_sortgrouplist_exprs
+ * Given a list of SortGroupClauses, build a list
+ * of the referenced targetlist expressions.
+ */
+List *
+get_sortgrouplist_exprs(List *sgClauses, List *targetList)
+{
+ List *result = NIL;
+ ListCell *l;
+
+ foreach(l, sgClauses)
+ {
+ SortGroupClause *sortcl = (SortGroupClause *) lfirst(l);
+ Node *sortexpr;
+
+ sortexpr = get_sortgroupclause_expr(sortcl, targetList);
+ result = lappend(result, sortexpr);
+ }
+ return result;
+}
+
+
+/*****************************************************************************
+ * Functions to extract data from a list of SortGroupClauses
+ *
+ * These don't really belong in tlist.c, but they are sort of related to the
+ * functions just above, and they don't seem to deserve their own file.
+ *****************************************************************************/
+
+/*
+ * get_sortgroupref_clause
+ * Find the SortGroupClause matching the given SortGroupRef index,
+ * and return it.
+ */
+SortGroupClause *
+get_sortgroupref_clause(Index sortref, List *clauses)
+{
+ ListCell *l;
+
+ foreach(l, clauses)
+ {
+ SortGroupClause *cl = (SortGroupClause *) lfirst(l);
+
+ if (cl->tleSortGroupRef == sortref)
+ return cl;
+ }
+
+ elog(ERROR, "ORDER/GROUP BY expression not found in list");
+ return NULL; /* keep compiler quiet */
+}
+
+/*
+ * get_sortgroupref_clause_noerr
+ * As above, but return NULL rather than throwing an error if not found.
+ */
+SortGroupClause *
+get_sortgroupref_clause_noerr(Index sortref, List *clauses)
+{
+ ListCell *l;
+
+ foreach(l, clauses)
+ {
+ SortGroupClause *cl = (SortGroupClause *) lfirst(l);
+
+ if (cl->tleSortGroupRef == sortref)
+ return cl;
+ }
+
+ return NULL;
+}
+
+/*
+ * extract_grouping_ops - make an array of the equality operator OIDs
+ * for a SortGroupClause list
+ */
+Oid *
+extract_grouping_ops(List *groupClause)
+{
+ int numCols = list_length(groupClause);
+ int colno = 0;
+ Oid *groupOperators;
+ ListCell *glitem;
+
+ groupOperators = (Oid *) palloc(sizeof(Oid) * numCols);
+
+ foreach(glitem, groupClause)
+ {
+ SortGroupClause *groupcl = (SortGroupClause *) lfirst(glitem);
+
+ groupOperators[colno] = groupcl->eqop;
+ Assert(OidIsValid(groupOperators[colno]));
+ colno++;
+ }
+
+ return groupOperators;
+}
+
+/*
+ * extract_grouping_collations - make an array of the grouping column collations
+ * for a SortGroupClause list
+ */
+Oid *
+extract_grouping_collations(List *groupClause, List *tlist)
+{
+ int numCols = list_length(groupClause);
+ int colno = 0;
+ Oid *grpCollations;
+ ListCell *glitem;
+
+ grpCollations = (Oid *) palloc(sizeof(Oid) * numCols);
+
+ foreach(glitem, groupClause)
+ {
+ SortGroupClause *groupcl = (SortGroupClause *) lfirst(glitem);
+ TargetEntry *tle = get_sortgroupclause_tle(groupcl, tlist);
+
+ grpCollations[colno++] = exprCollation((Node *) tle->expr);
+ }
+
+ return grpCollations;
+}
+
+/*
+ * extract_grouping_cols - make an array of the grouping column resnos
+ * for a SortGroupClause list
+ */
+AttrNumber *
+extract_grouping_cols(List *groupClause, List *tlist)
+{
+ AttrNumber *grpColIdx;
+ int numCols = list_length(groupClause);
+ int colno = 0;
+ ListCell *glitem;
+
+ grpColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
+
+ foreach(glitem, groupClause)
+ {
+ SortGroupClause *groupcl = (SortGroupClause *) lfirst(glitem);
+ TargetEntry *tle = get_sortgroupclause_tle(groupcl, tlist);
+
+ grpColIdx[colno++] = tle->resno;
+ }
+
+ return grpColIdx;
+}
+
+/*
+ * grouping_is_sortable - is it possible to implement grouping list by sorting?
+ *
+ * This is easy since the parser will have included a sortop if one exists.
+ */
+bool
+grouping_is_sortable(List *groupClause)
+{
+ ListCell *glitem;
+
+ foreach(glitem, groupClause)
+ {
+ SortGroupClause *groupcl = (SortGroupClause *) lfirst(glitem);
+
+ if (!OidIsValid(groupcl->sortop))
+ return false;
+ }
+ return true;
+}
+
+/*
+ * grouping_is_hashable - is it possible to implement grouping list by hashing?
+ *
+ * We rely on the parser to have set the hashable flag correctly.
+ */
+bool
+grouping_is_hashable(List *groupClause)
+{
+ ListCell *glitem;
+
+ foreach(glitem, groupClause)
+ {
+ SortGroupClause *groupcl = (SortGroupClause *) lfirst(glitem);
+
+ if (!groupcl->hashable)
+ return false;
+ }
+ return true;
+}
+
+
+/*****************************************************************************
+ * PathTarget manipulation functions
+ *
+ * PathTarget is a somewhat stripped-down version of a full targetlist; it
+ * omits all the TargetEntry decoration except (optionally) sortgroupref data,
+ * and it adds evaluation cost and output data width info.
+ *****************************************************************************/
+
+/*
+ * make_pathtarget_from_tlist
+ * Construct a PathTarget equivalent to the given targetlist.
+ *
+ * This leaves the cost and width fields as zeroes. Most callers will want
+ * to use create_pathtarget(), so as to get those set.
+ */
+PathTarget *
+make_pathtarget_from_tlist(List *tlist)
+{
+ PathTarget *target = makeNode(PathTarget);
+ int i;
+ ListCell *lc;
+
+ target->sortgrouprefs = (Index *) palloc(list_length(tlist) * sizeof(Index));
+
+ i = 0;
+ foreach(lc, tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lc);
+
+ target->exprs = lappend(target->exprs, tle->expr);
+ target->sortgrouprefs[i] = tle->ressortgroupref;
+ i++;
+ }
+
+ /*
+ * Mark volatility as unknown. The contain_volatile_functions function
+ * will determine if there are any volatile functions when called for the
+ * first time with this PathTarget.
+ */
+ target->has_volatile_expr = VOLATILITY_UNKNOWN;
+
+ return target;
+}
+
+/*
+ * make_tlist_from_pathtarget
+ * Construct a targetlist from a PathTarget.
+ */
+List *
+make_tlist_from_pathtarget(PathTarget *target)
+{
+ List *tlist = NIL;
+ int i;
+ ListCell *lc;
+
+ i = 0;
+ foreach(lc, target->exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+ TargetEntry *tle;
+
+ tle = makeTargetEntry(expr,
+ i + 1,
+ NULL,
+ false);
+ if (target->sortgrouprefs)
+ tle->ressortgroupref = target->sortgrouprefs[i];
+ tlist = lappend(tlist, tle);
+ i++;
+ }
+
+ return tlist;
+}
+
+/*
+ * copy_pathtarget
+ * Copy a PathTarget.
+ *
+ * The new PathTarget has its own exprs List, but shares the underlying
+ * target expression trees with the old one.
+ */
+PathTarget *
+copy_pathtarget(PathTarget *src)
+{
+ PathTarget *dst = makeNode(PathTarget);
+
+ /* Copy scalar fields */
+ memcpy(dst, src, sizeof(PathTarget));
+ /* Shallow-copy the expression list */
+ dst->exprs = list_copy(src->exprs);
+ /* Duplicate sortgrouprefs if any (if not, the memcpy handled this) */
+ if (src->sortgrouprefs)
+ {
+ Size nbytes = list_length(src->exprs) * sizeof(Index);
+
+ dst->sortgrouprefs = (Index *) palloc(nbytes);
+ memcpy(dst->sortgrouprefs, src->sortgrouprefs, nbytes);
+ }
+ return dst;
+}
+
+/*
+ * create_empty_pathtarget
+ * Create an empty (zero columns, zero cost) PathTarget.
+ */
+PathTarget *
+create_empty_pathtarget(void)
+{
+ /* This is easy, but we don't want callers to hard-wire this ... */
+ return makeNode(PathTarget);
+}
+
+/*
+ * add_column_to_pathtarget
+ * Append a target column to the PathTarget.
+ *
+ * As with make_pathtarget_from_tlist, we leave it to the caller to update
+ * the cost and width fields.
+ */
+void
+add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
+{
+ /* Updating the exprs list is easy ... */
+ target->exprs = lappend(target->exprs, expr);
+ /* ... the sortgroupref data, a bit less so */
+ if (target->sortgrouprefs)
+ {
+ int nexprs = list_length(target->exprs);
+
+ /* This might look inefficient, but actually it's usually cheap */
+ target->sortgrouprefs = (Index *)
+ repalloc(target->sortgrouprefs, nexprs * sizeof(Index));
+ target->sortgrouprefs[nexprs - 1] = sortgroupref;
+ }
+ else if (sortgroupref)
+ {
+ /* Adding sortgroupref labeling to a previously unlabeled target */
+ int nexprs = list_length(target->exprs);
+
+ target->sortgrouprefs = (Index *) palloc0(nexprs * sizeof(Index));
+ target->sortgrouprefs[nexprs - 1] = sortgroupref;
+ }
+
+ /*
+ * Reset has_volatile_expr to UNKNOWN. We just leave it up to
+ * contain_volatile_functions to set this properly again. Technically we
+ * could save some effort here and just check the new Expr, but it seems
+ * better to keep the logic for setting this flag in one location rather
+ * than duplicating the logic here.
+ */
+ if (target->has_volatile_expr == VOLATILITY_NOVOLATILE)
+ target->has_volatile_expr = VOLATILITY_UNKNOWN;
+}
+
+/*
+ * add_new_column_to_pathtarget
+ * Append a target column to the PathTarget, but only if it's not
+ * equal() to any pre-existing target expression.
+ *
+ * The caller cannot specify a sortgroupref, since it would be unclear how
+ * to merge that with a pre-existing column.
+ *
+ * As with make_pathtarget_from_tlist, we leave it to the caller to update
+ * the cost and width fields.
+ */
+void
+add_new_column_to_pathtarget(PathTarget *target, Expr *expr)
+{
+ if (!list_member(target->exprs, expr))
+ add_column_to_pathtarget(target, expr, 0);
+}
+
+/*
+ * add_new_columns_to_pathtarget
+ * Apply add_new_column_to_pathtarget() for each element of the list.
+ */
+void
+add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
+{
+ ListCell *lc;
+
+ foreach(lc, exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+
+ add_new_column_to_pathtarget(target, expr);
+ }
+}
+
+/*
+ * apply_pathtarget_labeling_to_tlist
+ * Apply any sortgrouprefs in the PathTarget to matching tlist entries
+ *
+ * Here, we do not assume that the tlist entries are one-for-one with the
+ * PathTarget. The intended use of this function is to deal with cases
+ * where createplan.c has decided to use some other tlist and we have
+ * to identify what matches exist.
+ */
+void
+apply_pathtarget_labeling_to_tlist(List *tlist, PathTarget *target)
+{
+ int i;
+ ListCell *lc;
+
+ /* Nothing to do if PathTarget has no sortgrouprefs data */
+ if (target->sortgrouprefs == NULL)
+ return;
+
+ i = 0;
+ foreach(lc, target->exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+ TargetEntry *tle;
+
+ if (target->sortgrouprefs[i])
+ {
+ /*
+ * For Vars, use tlist_member_match_var's weakened matching rule;
+ * this allows us to deal with some cases where a set-returning
+ * function has been inlined, so that we now have more knowledge
+ * about what it returns than we did when the original Var was
+ * created. Otherwise, use regular equal() to find the matching
+ * TLE. (In current usage, only the Var case is actually needed;
+ * but it seems best to have sane behavior here for non-Vars too.)
+ */
+ if (expr && IsA(expr, Var))
+ tle = tlist_member_match_var((Var *) expr, tlist);
+ else
+ tle = tlist_member(expr, tlist);
+
+ /*
+ * Complain if noplace for the sortgrouprefs label, or if we'd
+ * have to label a column twice. (The case where it already has
+ * the desired label probably can't happen, but we may as well
+ * allow for it.)
+ */
+ if (!tle)
+ elog(ERROR, "ORDER/GROUP BY expression not found in targetlist");
+ if (tle->ressortgroupref != 0 &&
+ tle->ressortgroupref != target->sortgrouprefs[i])
+ elog(ERROR, "targetlist item has multiple sortgroupref labels");
+
+ tle->ressortgroupref = target->sortgrouprefs[i];
+ }
+ i++;
+ }
+}
+
+/*
+ * split_pathtarget_at_srfs
+ * Split given PathTarget into multiple levels to position SRFs safely
+ *
+ * The executor can only handle set-returning functions that appear at the
+ * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
+ * that are not at top level, we need to split up the evaluation into multiple
+ * plan levels in which each level satisfies this constraint. This function
+ * creates appropriate PathTarget(s) for each level.
+ *
+ * As an example, consider the tlist expression
+ * x + srf1(srf2(y + z))
+ * This expression should appear as-is in the top PathTarget, but below that
+ * we must have a PathTarget containing
+ * x, srf1(srf2(y + z))
+ * and below that, another PathTarget containing
+ * x, srf2(y + z)
+ * and below that, another PathTarget containing
+ * x, y, z
+ * When these tlists are processed by setrefs.c, subexpressions that match
+ * output expressions of the next lower tlist will be replaced by Vars,
+ * so that what the executor gets are tlists looking like
+ * Var1 + Var2
+ * Var1, srf1(Var2)
+ * Var1, srf2(Var2 + Var3)
+ * x, y, z
+ * which satisfy the desired property.
+ *
+ * Another example is
+ * srf1(x), srf2(srf3(y))
+ * That must appear as-is in the top PathTarget, but below that we need
+ * srf1(x), srf3(y)
+ * That is, each SRF must be computed at a level corresponding to the nesting
+ * depth of SRFs within its arguments.
+ *
+ * In some cases, a SRF has already been evaluated in some previous plan level
+ * and we shouldn't expand it again (that is, what we see in the target is
+ * already meant as a reference to a lower subexpression). So, don't expand
+ * any tlist expressions that appear in input_target, if that's not NULL.
+ *
+ * It's also important that we preserve any sortgroupref annotation appearing
+ * in the given target, especially on expressions matching input_target items.
+ *
+ * The outputs of this function are two parallel lists, one a list of
+ * PathTargets and the other an integer list of bool flags indicating
+ * whether the corresponding PathTarget contains any evaluable SRFs.
+ * The lists are given in the order they'd need to be evaluated in, with
+ * the "lowest" PathTarget first. So the last list entry is always the
+ * originally given PathTarget, and any entries before it indicate evaluation
+ * levels that must be inserted below it. The first list entry must not
+ * contain any SRFs (other than ones duplicating input_target entries), since
+ * it will typically be attached to a plan node that cannot evaluate SRFs.
+ *
+ * Note: using a list for the flags may seem like overkill, since there
+ * are only a few possible patterns for which levels contain SRFs.
+ * But this representation decouples callers from that knowledge.
+ */
+void
+split_pathtarget_at_srfs(PlannerInfo *root,
+ PathTarget *target, PathTarget *input_target,
+ List **targets, List **targets_contain_srfs)
+{
+ split_pathtarget_context context;
+ int max_depth;
+ bool need_extra_projection;
+ List *prev_level_tlist;
+ int lci;
+ ListCell *lc,
+ *lc1,
+ *lc2,
+ *lc3;
+
+ /*
+ * It's not unusual for planner.c to pass us two physically identical
+ * targets, in which case we can conclude without further ado that all
+ * expressions are available from the input. (The logic below would
+ * arrive at the same conclusion, but much more tediously.)
+ */
+ if (target == input_target)
+ {
+ *targets = list_make1(target);
+ *targets_contain_srfs = list_make1_int(false);
+ return;
+ }
+
+ /* Pass any input_target exprs down to split_pathtarget_walker() */
+ context.input_target_exprs = input_target ? input_target->exprs : NIL;
+
+ /*
+ * Initialize with empty level-zero lists, and no levels after that.
+ * (Note: we could dispense with representing level zero explicitly, since
+ * it will never receive any SRFs, but then we'd have to special-case that
+ * level when we get to building result PathTargets. Level zero describes
+ * the SRF-free PathTarget that will be given to the input plan node.)
+ */
+ context.level_srfs = list_make1(NIL);
+ context.level_input_vars = list_make1(NIL);
+ context.level_input_srfs = list_make1(NIL);
+
+ /* Initialize data we'll accumulate across all the target expressions */
+ context.current_input_vars = NIL;
+ context.current_input_srfs = NIL;
+ max_depth = 0;
+ need_extra_projection = false;
+
+ /* Scan each expression in the PathTarget looking for SRFs */
+ lci = 0;
+ foreach(lc, target->exprs)
+ {
+ Node *node = (Node *) lfirst(lc);
+
+ /* Tell split_pathtarget_walker about this expr's sortgroupref */
+ context.current_sgref = get_pathtarget_sortgroupref(target, lci);
+ lci++;
+
+ /*
+ * Find all SRFs and Vars (and Var-like nodes) in this expression, and
+ * enter them into appropriate lists within the context struct.
+ */
+ context.current_depth = 0;
+ split_pathtarget_walker(node, &context);
+
+ /* An expression containing no SRFs is of no further interest */
+ if (context.current_depth == 0)
+ continue;
+
+ /*
+ * Track max SRF nesting depth over the whole PathTarget. Also, if
+ * this expression establishes a new max depth, we no longer care
+ * whether previous expressions contained nested SRFs; we can handle
+ * any required projection for them in the final ProjectSet node.
+ */
+ if (max_depth < context.current_depth)
+ {
+ max_depth = context.current_depth;
+ need_extra_projection = false;
+ }
+
+ /*
+ * If any maximum-depth SRF is not at the top level of its expression,
+ * we'll need an extra Result node to compute the top-level scalar
+ * expression.
+ */
+ if (max_depth == context.current_depth && !IS_SRF_CALL(node))
+ need_extra_projection = true;
+ }
+
+ /*
+ * If we found no SRFs needing evaluation (maybe they were all present in
+ * input_target, or maybe they were all removed by const-simplification),
+ * then no ProjectSet is needed; fall out.
+ */
+ if (max_depth == 0)
+ {
+ *targets = list_make1(target);
+ *targets_contain_srfs = list_make1_int(false);
+ return;
+ }
+
+ /*
+ * The Vars and SRF outputs needed at top level can be added to the last
+ * level_input lists if we don't need an extra projection step. If we do
+ * need one, add a SRF-free level to the lists.
+ */
+ if (need_extra_projection)
+ {
+ context.level_srfs = lappend(context.level_srfs, NIL);
+ context.level_input_vars = lappend(context.level_input_vars,
+ context.current_input_vars);
+ context.level_input_srfs = lappend(context.level_input_srfs,
+ context.current_input_srfs);
+ }
+ else
+ {
+ lc = list_nth_cell(context.level_input_vars, max_depth);
+ lfirst(lc) = list_concat(lfirst(lc), context.current_input_vars);
+ lc = list_nth_cell(context.level_input_srfs, max_depth);
+ lfirst(lc) = list_concat(lfirst(lc), context.current_input_srfs);
+ }
+
+ /*
+ * Now construct the output PathTargets. The original target can be used
+ * as-is for the last one, but we need to construct a new SRF-free target
+ * representing what the preceding plan node has to emit, as well as a
+ * target for each intermediate ProjectSet node.
+ */
+ *targets = *targets_contain_srfs = NIL;
+ prev_level_tlist = NIL;
+
+ forthree(lc1, context.level_srfs,
+ lc2, context.level_input_vars,
+ lc3, context.level_input_srfs)
+ {
+ List *level_srfs = (List *) lfirst(lc1);
+ PathTarget *ntarget;
+
+ if (lnext(context.level_srfs, lc1) == NULL)
+ {
+ ntarget = target;
+ }
+ else
+ {
+ ntarget = create_empty_pathtarget();
+
+ /*
+ * This target should actually evaluate any SRFs of the current
+ * level, and it needs to propagate forward any Vars needed by
+ * later levels, as well as SRFs computed earlier and needed by
+ * later levels.
+ */
+ add_sp_items_to_pathtarget(ntarget, level_srfs);
+ for_each_cell(lc, context.level_input_vars,
+ lnext(context.level_input_vars, lc2))
+ {
+ List *input_vars = (List *) lfirst(lc);
+
+ add_sp_items_to_pathtarget(ntarget, input_vars);
+ }
+ for_each_cell(lc, context.level_input_srfs,
+ lnext(context.level_input_srfs, lc3))
+ {
+ List *input_srfs = (List *) lfirst(lc);
+ ListCell *lcx;
+
+ foreach(lcx, input_srfs)
+ {
+ split_pathtarget_item *item = lfirst(lcx);
+
+ if (list_member(prev_level_tlist, item->expr))
+ add_sp_item_to_pathtarget(ntarget, item);
+ }
+ }
+ set_pathtarget_cost_width(root, ntarget);
+ }
+
+ /*
+ * Add current target and does-it-compute-SRFs flag to output lists.
+ */
+ *targets = lappend(*targets, ntarget);
+ *targets_contain_srfs = lappend_int(*targets_contain_srfs,
+ (level_srfs != NIL));
+
+ /* Remember this level's output for next pass */
+ prev_level_tlist = ntarget->exprs;
+ }
+}
+
+/*
+ * Recursively examine expressions for split_pathtarget_at_srfs.
+ *
+ * Note we make no effort here to prevent duplicate entries in the output
+ * lists. Duplicates will be gotten rid of later.
+ */
+static bool
+split_pathtarget_walker(Node *node, split_pathtarget_context *context)
+{
+ if (node == NULL)
+ return false;
+
+ /*
+ * A subexpression that matches an expression already computed in
+ * input_target can be treated like a Var (which indeed it will be after
+ * setrefs.c gets done with it), even if it's actually a SRF. Record it
+ * as being needed for the current expression, and ignore any
+ * substructure. (Note in particular that this preserves the identity of
+ * any expressions that appear as sortgrouprefs in input_target.)
+ */
+ if (list_member(context->input_target_exprs, node))
+ {
+ split_pathtarget_item *item = palloc(sizeof(split_pathtarget_item));
+
+ item->expr = node;
+ item->sortgroupref = context->current_sgref;
+ context->current_input_vars = lappend(context->current_input_vars,
+ item);
+ return false;
+ }
+
+ /*
+ * Vars and Var-like constructs are expected to be gotten from the input,
+ * too. We assume that these constructs cannot contain any SRFs (if one
+ * does, there will be an executor failure from a misplaced SRF).
+ */
+ if (IsA(node, Var) ||
+ IsA(node, PlaceHolderVar) ||
+ IsA(node, Aggref) ||
+ IsA(node, GroupingFunc) ||
+ IsA(node, WindowFunc))
+ {
+ split_pathtarget_item *item = palloc(sizeof(split_pathtarget_item));
+
+ item->expr = node;
+ item->sortgroupref = context->current_sgref;
+ context->current_input_vars = lappend(context->current_input_vars,
+ item);
+ return false;
+ }
+
+ /*
+ * If it's a SRF, recursively examine its inputs, determine its level, and
+ * make appropriate entries in the output lists.
+ */
+ if (IS_SRF_CALL(node))
+ {
+ split_pathtarget_item *item = palloc(sizeof(split_pathtarget_item));
+ List *save_input_vars = context->current_input_vars;
+ List *save_input_srfs = context->current_input_srfs;
+ int save_current_depth = context->current_depth;
+ int srf_depth;
+ ListCell *lc;
+
+ item->expr = node;
+ item->sortgroupref = context->current_sgref;
+
+ context->current_input_vars = NIL;
+ context->current_input_srfs = NIL;
+ context->current_depth = 0;
+ context->current_sgref = 0; /* subexpressions are not sortgroup items */
+
+ (void) expression_tree_walker(node, split_pathtarget_walker,
+ (void *) context);
+
+ /* Depth is one more than any SRF below it */
+ srf_depth = context->current_depth + 1;
+
+ /* If new record depth, initialize another level of output lists */
+ if (srf_depth >= list_length(context->level_srfs))
+ {
+ context->level_srfs = lappend(context->level_srfs, NIL);
+ context->level_input_vars = lappend(context->level_input_vars, NIL);
+ context->level_input_srfs = lappend(context->level_input_srfs, NIL);
+ }
+
+ /* Record this SRF as needing to be evaluated at appropriate level */
+ lc = list_nth_cell(context->level_srfs, srf_depth);
+ lfirst(lc) = lappend(lfirst(lc), item);
+
+ /* Record its inputs as being needed at the same level */
+ lc = list_nth_cell(context->level_input_vars, srf_depth);
+ lfirst(lc) = list_concat(lfirst(lc), context->current_input_vars);
+ lc = list_nth_cell(context->level_input_srfs, srf_depth);
+ lfirst(lc) = list_concat(lfirst(lc), context->current_input_srfs);
+
+ /*
+ * Restore caller-level state and update it for presence of this SRF.
+ * Notice we report the SRF itself as being needed for evaluation of
+ * surrounding expression.
+ */
+ context->current_input_vars = save_input_vars;
+ context->current_input_srfs = lappend(save_input_srfs, item);
+ context->current_depth = Max(save_current_depth, srf_depth);
+
+ /* We're done here */
+ return false;
+ }
+
+ /*
+ * Otherwise, the node is a scalar (non-set) expression, so recurse to
+ * examine its inputs.
+ */
+ context->current_sgref = 0; /* subexpressions are not sortgroup items */
+ return expression_tree_walker(node, split_pathtarget_walker,
+ (void *) context);
+}
+
+/*
+ * Add a split_pathtarget_item to the PathTarget, unless a matching item is
+ * already present. This is like add_new_column_to_pathtarget, but allows
+ * for sortgrouprefs to be handled. An item having zero sortgroupref can
+ * be merged with one that has a sortgroupref, acquiring the latter's
+ * sortgroupref.
+ *
+ * Note that we don't worry about possibly adding duplicate sortgrouprefs
+ * to the PathTarget. That would be bad, but it should be impossible unless
+ * the target passed to split_pathtarget_at_srfs already had duplicates.
+ * As long as it didn't, we can have at most one split_pathtarget_item with
+ * any particular nonzero sortgroupref.
+ */
+static void
+add_sp_item_to_pathtarget(PathTarget *target, split_pathtarget_item *item)
+{
+ int lci;
+ ListCell *lc;
+
+ /*
+ * Look for a pre-existing entry that is equal() and does not have a
+ * conflicting sortgroupref already.
+ */
+ lci = 0;
+ foreach(lc, target->exprs)
+ {
+ Node *node = (Node *) lfirst(lc);
+ Index sgref = get_pathtarget_sortgroupref(target, lci);
+
+ if ((item->sortgroupref == sgref ||
+ item->sortgroupref == 0 ||
+ sgref == 0) &&
+ equal(item->expr, node))
+ {
+ /* Found a match. Assign item's sortgroupref if it has one. */
+ if (item->sortgroupref)
+ {
+ if (target->sortgrouprefs == NULL)
+ {
+ target->sortgrouprefs = (Index *)
+ palloc0(list_length(target->exprs) * sizeof(Index));
+ }
+ target->sortgrouprefs[lci] = item->sortgroupref;
+ }
+ return;
+ }
+ lci++;
+ }
+
+ /*
+ * No match, so add item to PathTarget. Copy the expr for safety.
+ */
+ add_column_to_pathtarget(target, (Expr *) copyObject(item->expr),
+ item->sortgroupref);
+}
+
+/*
+ * Apply add_sp_item_to_pathtarget to each element of list.
+ */
+static void
+add_sp_items_to_pathtarget(PathTarget *target, List *items)
+{
+ ListCell *lc;
+
+ foreach(lc, items)
+ {
+ split_pathtarget_item *item = lfirst(lc);
+
+ add_sp_item_to_pathtarget(target, item);
+ }
+}
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
new file mode 100644
index 0000000..ebc6ce8
--- /dev/null
+++ b/src/backend/optimizer/util/var.c
@@ -0,0 +1,903 @@
+/*-------------------------------------------------------------------------
+ *
+ * var.c
+ * Var node manipulation routines
+ *
+ * Note: for most purposes, PlaceHolderVar is considered a Var too,
+ * even if its contained expression is variable-free. Also, CurrentOfExpr
+ * is treated as a Var for purposes of determining whether an expression
+ * contains variables.
+ *
+ *
+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/optimizer/util/var.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/sysattr.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/placeholder.h"
+#include "optimizer/prep.h"
+#include "parser/parsetree.h"
+#include "rewrite/rewriteManip.h"
+
+
+typedef struct
+{
+ Relids varnos;
+ PlannerInfo *root;
+ int sublevels_up;
+} pull_varnos_context;
+
+typedef struct
+{
+ Bitmapset *varattnos;
+ Index varno;
+} pull_varattnos_context;
+
+typedef struct
+{
+ List *vars;
+ int sublevels_up;
+} pull_vars_context;
+
+typedef struct
+{
+ int var_location;
+ int sublevels_up;
+} locate_var_of_level_context;
+
+typedef struct
+{
+ List *varlist;
+ int flags;
+} pull_var_clause_context;
+
+typedef struct
+{
+ Query *query; /* outer Query */
+ int sublevels_up;
+ bool possible_sublink; /* could aliases include a SubLink? */
+ bool inserted_sublink; /* have we inserted a SubLink? */
+} flatten_join_alias_vars_context;
+
+static bool pull_varnos_walker(Node *node,
+ pull_varnos_context *context);
+static bool pull_varattnos_walker(Node *node, pull_varattnos_context *context);
+static bool pull_vars_walker(Node *node, pull_vars_context *context);
+static bool contain_var_clause_walker(Node *node, void *context);
+static bool contain_vars_of_level_walker(Node *node, int *sublevels_up);
+static bool locate_var_of_level_walker(Node *node,
+ locate_var_of_level_context *context);
+static bool pull_var_clause_walker(Node *node,
+ pull_var_clause_context *context);
+static Node *flatten_join_alias_vars_mutator(Node *node,
+ flatten_join_alias_vars_context *context);
+static Relids alias_relid_set(Query *query, Relids relids);
+
+
+/*
+ * pull_varnos
+ * Create a set of all the distinct varnos present in a parsetree.
+ * Only varnos that reference level-zero rtable entries are considered.
+ *
+ * "root" can be passed as NULL if it is not necessary to process
+ * PlaceHolderVars.
+ *
+ * NOTE: this is used on not-yet-planned expressions. It may therefore find
+ * bare SubLinks, and if so it needs to recurse into them to look for uplevel
+ * references to the desired rtable level! But when we find a completed
+ * SubPlan, we only need to look at the parameters passed to the subplan.
+ */
+Relids
+pull_varnos(PlannerInfo *root, Node *node)
+{
+ pull_varnos_context context;
+
+ context.varnos = NULL;
+ context.root = root;
+ context.sublevels_up = 0;
+
+ /*
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
+ */
+ query_or_expression_tree_walker(node,
+ pull_varnos_walker,
+ (void *) &context,
+ 0);
+
+ return context.varnos;
+}
+
+/*
+ * pull_varnos_of_level
+ * Create a set of all the distinct varnos present in a parsetree.
+ * Only Vars of the specified level are considered.
+ */
+Relids
+pull_varnos_of_level(PlannerInfo *root, Node *node, int levelsup)
+{
+ pull_varnos_context context;
+
+ context.varnos = NULL;
+ context.root = root;
+ context.sublevels_up = levelsup;
+
+ /*
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
+ */
+ query_or_expression_tree_walker(node,
+ pull_varnos_walker,
+ (void *) &context,
+ 0);
+
+ return context.varnos;
+}
+
+static bool
+pull_varnos_walker(Node *node, pull_varnos_context *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+
+ if (var->varlevelsup == context->sublevels_up)
+ context->varnos = bms_add_member(context->varnos, var->varno);
+ return false;
+ }
+ if (IsA(node, CurrentOfExpr))
+ {
+ CurrentOfExpr *cexpr = (CurrentOfExpr *) node;
+
+ if (context->sublevels_up == 0)
+ context->varnos = bms_add_member(context->varnos, cexpr->cvarno);
+ return false;
+ }
+ if (IsA(node, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+
+ /*
+ * If a PlaceHolderVar is not of the target query level, ignore it,
+ * instead recursing into its expression to see if it contains any
+ * vars that are of the target level. We'll also do that when the
+ * caller doesn't pass a "root" pointer. (We probably shouldn't see
+ * PlaceHolderVars at all in such cases, but if we do, this is a
+ * reasonable behavior.)
+ */
+ if (phv->phlevelsup == context->sublevels_up &&
+ context->root != NULL)
+ {
+ /*
+ * Ideally, the PHV's contribution to context->varnos is its
+ * ph_eval_at set. However, this code can be invoked before
+ * that's been computed. If we cannot find a PlaceHolderInfo,
+ * fall back to the conservative assumption that the PHV will be
+ * evaluated at its syntactic level (phv->phrels).
+ *
+ * There is a second hazard: this code is also used to examine
+ * qual clauses during deconstruct_jointree, when we may have a
+ * PlaceHolderInfo but its ph_eval_at value is not yet final, so
+ * that theoretically we could obtain a relid set that's smaller
+ * than we'd see later on. That should never happen though,
+ * because we deconstruct the jointree working upwards. Any outer
+ * join that forces delay of evaluation of a given qual clause
+ * will be processed before we examine that clause here, so the
+ * ph_eval_at value should have been updated to include it.
+ *
+ * Another problem is that a PlaceHolderVar can appear in quals or
+ * tlists that have been translated for use in a child appendrel.
+ * Typically such a PHV is a parameter expression sourced by some
+ * other relation, so that the translation from parent appendrel
+ * to child doesn't change its phrels, and we should still take
+ * ph_eval_at at face value. But in corner cases, the PHV's
+ * original phrels can include the parent appendrel itself, in
+ * which case the translated PHV will have the child appendrel in
+ * phrels, and we must translate ph_eval_at to match.
+ */
+ PlaceHolderInfo *phinfo = NULL;
+
+ if (phv->phlevelsup == 0)
+ {
+ ListCell *lc;
+
+ foreach(lc, context->root->placeholder_list)
+ {
+ phinfo = (PlaceHolderInfo *) lfirst(lc);
+ if (phinfo->phid == phv->phid)
+ break;
+ phinfo = NULL;
+ }
+ }
+ if (phinfo == NULL)
+ {
+ /* No PlaceHolderInfo yet, use phrels */
+ context->varnos = bms_add_members(context->varnos,
+ phv->phrels);
+ }
+ else if (bms_equal(phv->phrels, phinfo->ph_var->phrels))
+ {
+ /* Normal case: use ph_eval_at */
+ context->varnos = bms_add_members(context->varnos,
+ phinfo->ph_eval_at);
+ }
+ else
+ {
+ /* Translated PlaceHolderVar: translate ph_eval_at to match */
+ Relids newevalat,
+ delta;
+
+ /* remove what was removed from phv->phrels ... */
+ delta = bms_difference(phinfo->ph_var->phrels, phv->phrels);
+ newevalat = bms_difference(phinfo->ph_eval_at, delta);
+ /* ... then if that was in fact part of ph_eval_at ... */
+ if (!bms_equal(newevalat, phinfo->ph_eval_at))
+ {
+ /* ... add what was added */
+ delta = bms_difference(phv->phrels, phinfo->ph_var->phrels);
+ newevalat = bms_join(newevalat, delta);
+ }
+ context->varnos = bms_join(context->varnos,
+ newevalat);
+ }
+ return false; /* don't recurse into expression */
+ }
+ }
+ else if (IsA(node, Query))
+ {
+ /* Recurse into RTE subquery or not-yet-planned sublink subquery */
+ bool result;
+
+ context->sublevels_up++;
+ result = query_tree_walker((Query *) node, pull_varnos_walker,
+ (void *) context, 0);
+ context->sublevels_up--;
+ return result;
+ }
+ return expression_tree_walker(node, pull_varnos_walker,
+ (void *) context);
+}
+
+
+/*
+ * pull_varattnos
+ * Find all the distinct attribute numbers present in an expression tree,
+ * and add them to the initial contents of *varattnos.
+ * Only Vars of the given varno and rtable level zero are considered.
+ *
+ * Attribute numbers are offset by FirstLowInvalidHeapAttributeNumber so that
+ * we can include system attributes (e.g., OID) in the bitmap representation.
+ *
+ * Currently, this does not support unplanned subqueries; that is not needed
+ * for current uses. It will handle already-planned SubPlan nodes, though,
+ * looking into only the "testexpr" and the "args" list. (The subplan cannot
+ * contain any other references to Vars of the current level.)
+ */
+void
+pull_varattnos(Node *node, Index varno, Bitmapset **varattnos)
+{
+ pull_varattnos_context context;
+
+ context.varattnos = *varattnos;
+ context.varno = varno;
+
+ (void) pull_varattnos_walker(node, &context);
+
+ *varattnos = context.varattnos;
+}
+
+static bool
+pull_varattnos_walker(Node *node, pull_varattnos_context *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+
+ if (var->varno == context->varno && var->varlevelsup == 0)
+ context->varattnos =
+ bms_add_member(context->varattnos,
+ var->varattno - FirstLowInvalidHeapAttributeNumber);
+ return false;
+ }
+
+ /* Should not find an unplanned subquery */
+ Assert(!IsA(node, Query));
+
+ return expression_tree_walker(node, pull_varattnos_walker,
+ (void *) context);
+}
+
+
+/*
+ * pull_vars_of_level
+ * Create a list of all Vars (and PlaceHolderVars) referencing the
+ * specified query level in the given parsetree.
+ *
+ * Caution: the Vars are not copied, only linked into the list.
+ */
+List *
+pull_vars_of_level(Node *node, int levelsup)
+{
+ pull_vars_context context;
+
+ context.vars = NIL;
+ context.sublevels_up = levelsup;
+
+ /*
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
+ */
+ query_or_expression_tree_walker(node,
+ pull_vars_walker,
+ (void *) &context,
+ 0);
+
+ return context.vars;
+}
+
+static bool
+pull_vars_walker(Node *node, pull_vars_context *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+
+ if (var->varlevelsup == context->sublevels_up)
+ context->vars = lappend(context->vars, var);
+ return false;
+ }
+ if (IsA(node, PlaceHolderVar))
+ {
+ PlaceHolderVar *phv = (PlaceHolderVar *) node;
+
+ if (phv->phlevelsup == context->sublevels_up)
+ context->vars = lappend(context->vars, phv);
+ /* we don't want to look into the contained expression */
+ return false;
+ }
+ if (IsA(node, Query))
+ {
+ /* Recurse into RTE subquery or not-yet-planned sublink subquery */
+ bool result;
+
+ context->sublevels_up++;
+ result = query_tree_walker((Query *) node, pull_vars_walker,
+ (void *) context, 0);
+ context->sublevels_up--;
+ return result;
+ }
+ return expression_tree_walker(node, pull_vars_walker,
+ (void *) context);
+}
+
+
+/*
+ * contain_var_clause
+ * Recursively scan a clause to discover whether it contains any Var nodes
+ * (of the current query level).
+ *
+ * Returns true if any varnode found.
+ *
+ * Does not examine subqueries, therefore must only be used after reduction
+ * of sublinks to subplans!
+ */
+bool
+contain_var_clause(Node *node)
+{
+ return contain_var_clause_walker(node, NULL);
+}
+
+static bool
+contain_var_clause_walker(Node *node, void *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Var))
+ {
+ if (((Var *) node)->varlevelsup == 0)
+ return true; /* abort the tree traversal and return true */
+ return false;
+ }
+ if (IsA(node, CurrentOfExpr))
+ return true;
+ if (IsA(node, PlaceHolderVar))
+ {
+ if (((PlaceHolderVar *) node)->phlevelsup == 0)
+ return true; /* abort the tree traversal and return true */
+ /* else fall through to check the contained expr */
+ }
+ return expression_tree_walker(node, contain_var_clause_walker, context);
+}
+
+
+/*
+ * contain_vars_of_level
+ * Recursively scan a clause to discover whether it contains any Var nodes
+ * of the specified query level.
+ *
+ * Returns true if any such Var found.
+ *
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ */
+bool
+contain_vars_of_level(Node *node, int levelsup)
+{
+ int sublevels_up = levelsup;
+
+ return query_or_expression_tree_walker(node,
+ contain_vars_of_level_walker,
+ (void *) &sublevels_up,
+ 0);
+}
+
+static bool
+contain_vars_of_level_walker(Node *node, int *sublevels_up)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Var))
+ {
+ if (((Var *) node)->varlevelsup == *sublevels_up)
+ return true; /* abort tree traversal and return true */
+ return false;
+ }
+ if (IsA(node, CurrentOfExpr))
+ {
+ if (*sublevels_up == 0)
+ return true;
+ return false;
+ }
+ if (IsA(node, PlaceHolderVar))
+ {
+ if (((PlaceHolderVar *) node)->phlevelsup == *sublevels_up)
+ return true; /* abort the tree traversal and return true */
+ /* else fall through to check the contained expr */
+ }
+ if (IsA(node, Query))
+ {
+ /* Recurse into subselects */
+ bool result;
+
+ (*sublevels_up)++;
+ result = query_tree_walker((Query *) node,
+ contain_vars_of_level_walker,
+ (void *) sublevels_up,
+ 0);
+ (*sublevels_up)--;
+ return result;
+ }
+ return expression_tree_walker(node,
+ contain_vars_of_level_walker,
+ (void *) sublevels_up);
+}
+
+
+/*
+ * locate_var_of_level
+ * Find the parse location of any Var of the specified query level.
+ *
+ * Returns -1 if no such Var is in the querytree, or if they all have
+ * unknown parse location. (The former case is probably caller error,
+ * but we don't bother to distinguish it from the latter case.)
+ *
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ *
+ * Note: it might seem appropriate to merge this functionality into
+ * contain_vars_of_level, but that would complicate that function's API.
+ * Currently, the only uses of this function are for error reporting,
+ * and so shaving cycles probably isn't very important.
+ */
+int
+locate_var_of_level(Node *node, int levelsup)
+{
+ locate_var_of_level_context context;
+
+ context.var_location = -1; /* in case we find nothing */
+ context.sublevels_up = levelsup;
+
+ (void) query_or_expression_tree_walker(node,
+ locate_var_of_level_walker,
+ (void *) &context,
+ 0);
+
+ return context.var_location;
+}
+
+static bool
+locate_var_of_level_walker(Node *node,
+ locate_var_of_level_context *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+
+ if (var->varlevelsup == context->sublevels_up &&
+ var->location >= 0)
+ {
+ context->var_location = var->location;
+ return true; /* abort tree traversal and return true */
+ }
+ return false;
+ }
+ if (IsA(node, CurrentOfExpr))
+ {
+ /* since CurrentOfExpr doesn't carry location, nothing we can do */
+ return false;
+ }
+ /* No extra code needed for PlaceHolderVar; just look in contained expr */
+ if (IsA(node, Query))
+ {
+ /* Recurse into subselects */
+ bool result;
+
+ context->sublevels_up++;
+ result = query_tree_walker((Query *) node,
+ locate_var_of_level_walker,
+ (void *) context,
+ 0);
+ context->sublevels_up--;
+ return result;
+ }
+ return expression_tree_walker(node,
+ locate_var_of_level_walker,
+ (void *) context);
+}
+
+
+/*
+ * pull_var_clause
+ * Recursively pulls all Var nodes from an expression clause.
+ *
+ * Aggrefs are handled according to these bits in 'flags':
+ * PVC_INCLUDE_AGGREGATES include Aggrefs in output list
+ * PVC_RECURSE_AGGREGATES recurse into Aggref arguments
+ * neither flag throw error if Aggref found
+ * Vars within an Aggref's expression are included in the result only
+ * when PVC_RECURSE_AGGREGATES is specified.
+ *
+ * WindowFuncs are handled according to these bits in 'flags':
+ * PVC_INCLUDE_WINDOWFUNCS include WindowFuncs in output list
+ * PVC_RECURSE_WINDOWFUNCS recurse into WindowFunc arguments
+ * neither flag throw error if WindowFunc found
+ * Vars within a WindowFunc's expression are included in the result only
+ * when PVC_RECURSE_WINDOWFUNCS is specified.
+ *
+ * PlaceHolderVars are handled according to these bits in 'flags':
+ * PVC_INCLUDE_PLACEHOLDERS include PlaceHolderVars in output list
+ * PVC_RECURSE_PLACEHOLDERS recurse into PlaceHolderVar arguments
+ * neither flag throw error if PlaceHolderVar found
+ * Vars within a PHV's expression are included in the result only
+ * when PVC_RECURSE_PLACEHOLDERS is specified.
+ *
+ * GroupingFuncs are treated exactly like Aggrefs, and so do not need
+ * their own flag bits.
+ *
+ * CurrentOfExpr nodes are ignored in all cases.
+ *
+ * Upper-level vars (with varlevelsup > 0) should not be seen here,
+ * likewise for upper-level Aggrefs and PlaceHolderVars.
+ *
+ * Returns list of nodes found. Note the nodes themselves are not
+ * copied, only referenced.
+ *
+ * Does not examine subqueries, therefore must only be used after reduction
+ * of sublinks to subplans!
+ */
+List *
+pull_var_clause(Node *node, int flags)
+{
+ pull_var_clause_context context;
+
+ /* Assert that caller has not specified inconsistent flags */
+ Assert((flags & (PVC_INCLUDE_AGGREGATES | PVC_RECURSE_AGGREGATES))
+ != (PVC_INCLUDE_AGGREGATES | PVC_RECURSE_AGGREGATES));
+ Assert((flags & (PVC_INCLUDE_WINDOWFUNCS | PVC_RECURSE_WINDOWFUNCS))
+ != (PVC_INCLUDE_WINDOWFUNCS | PVC_RECURSE_WINDOWFUNCS));
+ Assert((flags & (PVC_INCLUDE_PLACEHOLDERS | PVC_RECURSE_PLACEHOLDERS))
+ != (PVC_INCLUDE_PLACEHOLDERS | PVC_RECURSE_PLACEHOLDERS));
+
+ context.varlist = NIL;
+ context.flags = flags;
+
+ pull_var_clause_walker(node, &context);
+ return context.varlist;
+}
+
+static bool
+pull_var_clause_walker(Node *node, pull_var_clause_context *context)
+{
+ if (node == NULL)
+ return false;
+ if (IsA(node, Var))
+ {
+ if (((Var *) node)->varlevelsup != 0)
+ elog(ERROR, "Upper-level Var found where not expected");
+ context->varlist = lappend(context->varlist, node);
+ return false;
+ }
+ else if (IsA(node, Aggref))
+ {
+ if (((Aggref *) node)->agglevelsup != 0)
+ elog(ERROR, "Upper-level Aggref found where not expected");
+ if (context->flags & PVC_INCLUDE_AGGREGATES)
+ {
+ context->varlist = lappend(context->varlist, node);
+ /* we do NOT descend into the contained expression */
+ return false;
+ }
+ else if (context->flags & PVC_RECURSE_AGGREGATES)
+ {
+ /* fall through to recurse into the aggregate's arguments */
+ }
+ else
+ elog(ERROR, "Aggref found where not expected");
+ }
+ else if (IsA(node, GroupingFunc))
+ {
+ if (((GroupingFunc *) node)->agglevelsup != 0)
+ elog(ERROR, "Upper-level GROUPING found where not expected");
+ if (context->flags & PVC_INCLUDE_AGGREGATES)
+ {
+ context->varlist = lappend(context->varlist, node);
+ /* we do NOT descend into the contained expression */
+ return false;
+ }
+ else if (context->flags & PVC_RECURSE_AGGREGATES)
+ {
+ /* fall through to recurse into the GroupingFunc's arguments */
+ }
+ else
+ elog(ERROR, "GROUPING found where not expected");
+ }
+ else if (IsA(node, WindowFunc))
+ {
+ /* WindowFuncs have no levelsup field to check ... */
+ if (context->flags & PVC_INCLUDE_WINDOWFUNCS)
+ {
+ context->varlist = lappend(context->varlist, node);
+ /* we do NOT descend into the contained expressions */
+ return false;
+ }
+ else if (context->flags & PVC_RECURSE_WINDOWFUNCS)
+ {
+ /* fall through to recurse into the windowfunc's arguments */
+ }
+ else
+ elog(ERROR, "WindowFunc found where not expected");
+ }
+ else if (IsA(node, PlaceHolderVar))
+ {
+ if (((PlaceHolderVar *) node)->phlevelsup != 0)
+ elog(ERROR, "Upper-level PlaceHolderVar found where not expected");
+ if (context->flags & PVC_INCLUDE_PLACEHOLDERS)
+ {
+ context->varlist = lappend(context->varlist, node);
+ /* we do NOT descend into the contained expression */
+ return false;
+ }
+ else if (context->flags & PVC_RECURSE_PLACEHOLDERS)
+ {
+ /* fall through to recurse into the placeholder's expression */
+ }
+ else
+ elog(ERROR, "PlaceHolderVar found where not expected");
+ }
+ return expression_tree_walker(node, pull_var_clause_walker,
+ (void *) context);
+}
+
+
+/*
+ * flatten_join_alias_vars
+ * Replace Vars that reference JOIN outputs with references to the original
+ * relation variables instead. This allows quals involving such vars to be
+ * pushed down. Whole-row Vars that reference JOIN relations are expanded
+ * into RowExpr constructs that name the individual output Vars. This
+ * is necessary since we will not scan the JOIN as a base relation, which
+ * is the only way that the executor can directly handle whole-row Vars.
+ *
+ * This also adjusts relid sets found in some expression node types to
+ * substitute the contained base rels for any join relid.
+ *
+ * If a JOIN contains sub-selects that have been flattened, its join alias
+ * entries might now be arbitrary expressions, not just Vars. This affects
+ * this function in one important way: we might find ourselves inserting
+ * SubLink expressions into subqueries, and we must make sure that their
+ * Query.hasSubLinks fields get set to true if so. If there are any
+ * SubLinks in the join alias lists, the outer Query should already have
+ * hasSubLinks = true, so this is only relevant to un-flattened subqueries.
+ *
+ * NOTE: this is used on not-yet-planned expressions. We do not expect it
+ * to be applied directly to the whole Query, so if we see a Query to start
+ * with, we do want to increment sublevels_up (this occurs for LATERAL
+ * subqueries).
+ */
+Node *
+flatten_join_alias_vars(Query *query, Node *node)
+{
+ flatten_join_alias_vars_context context;
+
+ context.query = query;
+ context.sublevels_up = 0;
+ /* flag whether join aliases could possibly contain SubLinks */
+ context.possible_sublink = query->hasSubLinks;
+ /* if hasSubLinks is already true, no need to work hard */
+ context.inserted_sublink = query->hasSubLinks;
+
+ return flatten_join_alias_vars_mutator(node, &context);
+}
+
+static Node *
+flatten_join_alias_vars_mutator(Node *node,
+ flatten_join_alias_vars_context *context)
+{
+ if (node == NULL)
+ return NULL;
+ if (IsA(node, Var))
+ {
+ Var *var = (Var *) node;
+ RangeTblEntry *rte;
+ Node *newvar;
+
+ /* No change unless Var belongs to a JOIN of the target level */
+ if (var->varlevelsup != context->sublevels_up)
+ return node; /* no need to copy, really */
+ rte = rt_fetch(var->varno, context->query->rtable);
+ if (rte->rtekind != RTE_JOIN)
+ return node;
+ if (var->varattno == InvalidAttrNumber)
+ {
+ /* Must expand whole-row reference */
+ RowExpr *rowexpr;
+ List *fields = NIL;
+ List *colnames = NIL;
+ ListCell *lv;
+ ListCell *ln;
+
+ Assert(list_length(rte->joinaliasvars) == list_length(rte->eref->colnames));
+ forboth(lv, rte->joinaliasvars, ln, rte->eref->colnames)
+ {
+ newvar = (Node *) lfirst(lv);
+ /* Ignore dropped columns */
+ if (newvar == NULL)
+ continue;
+ newvar = copyObject(newvar);
+
+ /*
+ * If we are expanding an alias carried down from an upper
+ * query, must adjust its varlevelsup fields.
+ */
+ if (context->sublevels_up != 0)
+ IncrementVarSublevelsUp(newvar, context->sublevels_up, 0);
+ /* Preserve original Var's location, if possible */
+ if (IsA(newvar, Var))
+ ((Var *) newvar)->location = var->location;
+ /* Recurse in case join input is itself a join */
+ /* (also takes care of setting inserted_sublink if needed) */
+ newvar = flatten_join_alias_vars_mutator(newvar, context);
+ fields = lappend(fields, newvar);
+ /* We need the names of non-dropped columns, too */
+ colnames = lappend(colnames, copyObject((Node *) lfirst(ln)));
+ }
+ rowexpr = makeNode(RowExpr);
+ rowexpr->args = fields;
+ rowexpr->row_typeid = var->vartype;
+ rowexpr->row_format = COERCE_IMPLICIT_CAST;
+ /* vartype will always be RECORDOID, so we always need colnames */
+ rowexpr->colnames = colnames;
+ rowexpr->location = var->location;
+
+ return (Node *) rowexpr;
+ }
+
+ /* Expand join alias reference */
+ Assert(var->varattno > 0);
+ newvar = (Node *) list_nth(rte->joinaliasvars, var->varattno - 1);
+ Assert(newvar != NULL);
+ newvar = copyObject(newvar);
+
+ /*
+ * If we are expanding an alias carried down from an upper query, must
+ * adjust its varlevelsup fields.
+ */
+ if (context->sublevels_up != 0)
+ IncrementVarSublevelsUp(newvar, context->sublevels_up, 0);
+
+ /* Preserve original Var's location, if possible */
+ if (IsA(newvar, Var))
+ ((Var *) newvar)->location = var->location;
+
+ /* Recurse in case join input is itself a join */
+ newvar = flatten_join_alias_vars_mutator(newvar, context);
+
+ /* Detect if we are adding a sublink to query */
+ if (context->possible_sublink && !context->inserted_sublink)
+ context->inserted_sublink = checkExprHasSubLink(newvar);
+
+ return newvar;
+ }
+ if (IsA(node, PlaceHolderVar))
+ {
+ /* Copy the PlaceHolderVar node with correct mutation of subnodes */
+ PlaceHolderVar *phv;
+
+ phv = (PlaceHolderVar *) expression_tree_mutator(node,
+ flatten_join_alias_vars_mutator,
+ (void *) context);
+ /* now fix PlaceHolderVar's relid sets */
+ if (phv->phlevelsup == context->sublevels_up)
+ {
+ phv->phrels = alias_relid_set(context->query,
+ phv->phrels);
+ }
+ return (Node *) phv;
+ }
+
+ if (IsA(node, Query))
+ {
+ /* Recurse into RTE subquery or not-yet-planned sublink subquery */
+ Query *newnode;
+ bool save_inserted_sublink;
+
+ context->sublevels_up++;
+ save_inserted_sublink = context->inserted_sublink;
+ context->inserted_sublink = ((Query *) node)->hasSubLinks;
+ newnode = query_tree_mutator((Query *) node,
+ flatten_join_alias_vars_mutator,
+ (void *) context,
+ QTW_IGNORE_JOINALIASES);
+ newnode->hasSubLinks |= context->inserted_sublink;
+ context->inserted_sublink = save_inserted_sublink;
+ context->sublevels_up--;
+ return (Node *) newnode;
+ }
+ /* Already-planned tree not supported */
+ Assert(!IsA(node, SubPlan));
+ /* Shouldn't need to handle these planner auxiliary nodes here */
+ Assert(!IsA(node, SpecialJoinInfo));
+ Assert(!IsA(node, PlaceHolderInfo));
+ Assert(!IsA(node, MinMaxAggInfo));
+
+ return expression_tree_mutator(node, flatten_join_alias_vars_mutator,
+ (void *) context);
+}
+
+/*
+ * alias_relid_set: in a set of RT indexes, replace joins by their
+ * underlying base relids
+ */
+static Relids
+alias_relid_set(Query *query, Relids relids)
+{
+ Relids result = NULL;
+ int rtindex;
+
+ rtindex = -1;
+ while ((rtindex = bms_next_member(relids, rtindex)) >= 0)
+ {
+ RangeTblEntry *rte = rt_fetch(rtindex, query->rtable);
+
+ if (rte->rtekind == RTE_JOIN)
+ result = bms_join(result, get_relids_for_join(query, rtindex));
+ else
+ result = bms_add_member(result, rtindex);
+ }
+ return result;
+}