1 /*-------------------------------------------------------------------------
4 * Routines copied from PostgreSQL core distribution.
6 * src/backend/optimizer/path/allpaths.c
7 * set_append_rel_pathlist()
8 * accumulate_append_subpath()
9 * set_dummy_rel_pathlist()
10 * standard_join_search()
12 * src/backend/optimizer/path/joinrels.c
13 * join_search_one_level()
14 * make_rels_by_clause_joins()
15 * make_rels_by_clauseless_joins()
17 * has_join_restriction()
20 * restriction_is_constant_false()
22 * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
23 * Portions Copyright (c) 1994, Regents of the University of California
25 *-------------------------------------------------------------------------
29 * set_append_rel_pathlist
30 * Build access paths for an "append relation"
32 * The passed-in rel and RTE represent the entire append relation. The
33 * relation's contents are computed by appending together the output of
34 * the individual member relations. Note that in the inheritance case,
35 * the first member relation is actually the same table as is mentioned in
36 * the parent RTE ... but it has a different RTE and RelOptInfo. This is
37 * a good thing because their outputs are not the same size.
40 set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
41 Index rti, RangeTblEntry *rte)
43 int parentRTindex = rti;
44 List *live_childrels = NIL;
46 List *all_child_pathkeys = NIL;
49 double *parent_attrsizes;
54 * Initialize to compute size estimates for whole append relation.
56 * We handle width estimates by weighting the widths of different child
57 * rels proportionally to their number of rows. This is sensible because
58 * the use of width estimates is mainly to compute the total relation
59 * "footprint" if we have to sort or hash it. To do this, we sum the
60 * total equivalent size (in "double" arithmetic) and then divide by the
61 * total rowcount estimate. This is done separately for the total rel
62 * width and each attribute.
64 * Note: if you consider changing this logic, beware that child rels could
65 * have zero rows and/or width, if they were excluded by constraints.
69 nattrs = rel->max_attr - rel->min_attr + 1;
70 parent_attrsizes = (double *) palloc0(nattrs * sizeof(double));
73 * Generate access paths for each member relation, and pick the cheapest
76 foreach(l, root->append_rel_list)
78 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
80 RangeTblEntry *childRTE;
88 /* append_rel_list contains all append rels; ignore others */
89 if (appinfo->parent_relid != parentRTindex)
92 childRTindex = appinfo->child_relid;
93 childRTE = root->simple_rte_array[childRTindex];
96 * The child rel's RelOptInfo was already created during
97 * add_base_rels_to_query.
99 childrel = find_base_rel(root, childRTindex);
100 Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL);
103 * We have to copy the parent's targetlist and quals to the child,
104 * with appropriate substitution of variables. However, only the
105 * baserestrictinfo quals are needed before we can check for
106 * constraint exclusion; so do that first and then check to see if we
107 * can disregard this child.
109 * As of 8.4, the child rel's targetlist might contain non-Var
110 * expressions, which means that substitution into the quals could
111 * produce opportunities for const-simplification, and perhaps even
112 * pseudoconstant quals. To deal with this, we strip the RestrictInfo
113 * nodes, do the substitution, do const-simplification, and then
114 * reconstitute the RestrictInfo layer.
116 childquals = get_all_actual_clauses(rel->baserestrictinfo);
117 childquals = (List *) adjust_appendrel_attrs((Node *) childquals,
119 childqual = eval_const_expressions(root, (Node *)
120 make_ands_explicit(childquals));
121 if (childqual && IsA(childqual, Const) &&
122 (((Const *) childqual)->constisnull ||
123 !DatumGetBool(((Const *) childqual)->constvalue)))
126 * Restriction reduces to constant FALSE or constant NULL after
127 * substitution, so this child need not be scanned.
129 set_dummy_rel_pathlist(childrel);
132 childquals = make_ands_implicit((Expr *) childqual);
133 childquals = make_restrictinfos_from_actual_clauses(root,
135 childrel->baserestrictinfo = childquals;
137 if (relation_excluded_by_constraints(root, childrel, childRTE))
140 * This child need not be scanned, so we can omit it from the
141 * appendrel. Mark it with a dummy cheapest-path though, in case
142 * best_appendrel_indexscan() looks at it later.
144 set_dummy_rel_pathlist(childrel);
149 * CE failed, so finish copying/modifying targetlist and join quals.
151 * Note: the resulting childrel->reltargetlist may contain arbitrary
152 * expressions, which normally would not occur in a reltargetlist.
153 * That is okay because nothing outside of this routine will look at
154 * the child rel's reltargetlist. We do have to cope with the case
155 * while constructing attr_widths estimates below, though.
157 childrel->joininfo = (List *)
158 adjust_appendrel_attrs((Node *) rel->joininfo,
160 childrel->reltargetlist = (List *)
161 adjust_appendrel_attrs((Node *) rel->reltargetlist,
165 * We have to make child entries in the EquivalenceClass data
166 * structures as well. This is needed either if the parent
167 * participates in some eclass joins (because we will want to consider
168 * inner-indexscan joins on the individual children) or if the parent
169 * has useful pathkeys (because we should try to build MergeAppend
170 * paths that produce those sort orderings).
172 if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
173 add_child_rel_equivalences(root, appinfo, rel, childrel);
174 childrel->has_eclass_joins = rel->has_eclass_joins;
177 * Note: we could compute appropriate attr_needed data for the child's
178 * variables, by transforming the parent's attr_needed through the
179 * translated_vars mapping. However, currently there's no need
180 * because attr_needed is only examined for base relations not
181 * otherrels. So we just leave the child's attr_needed empty.
184 /* Remember which childrels are live, for MergeAppend logic below */
185 live_childrels = lappend(live_childrels, childrel);
188 * Compute the child's access paths, and add the cheapest one to the
189 * Append path we are constructing for the parent.
191 set_rel_pathlist(root, childrel, childRTindex, childRTE);
193 subpaths = accumulate_append_subpath(subpaths,
194 childrel->cheapest_total_path);
197 * Collect a list of all the available path orderings for all the
198 * children. We use this as a heuristic to indicate which sort
199 * orderings we should build MergeAppend paths for.
201 foreach(lcp, childrel->pathlist)
203 Path *childpath = (Path *) lfirst(lcp);
204 List *childkeys = childpath->pathkeys;
208 /* Ignore unsorted paths */
209 if (childkeys == NIL)
212 /* Have we already seen this ordering? */
213 foreach(lpk, all_child_pathkeys)
215 List *existing_pathkeys = (List *) lfirst(lpk);
217 if (compare_pathkeys(existing_pathkeys,
218 childkeys) == PATHKEYS_EQUAL)
226 /* No, so add it to all_child_pathkeys */
227 all_child_pathkeys = lappend(all_child_pathkeys, childkeys);
232 * Accumulate size information from each child.
234 if (childrel->rows > 0)
236 parent_rows += childrel->rows;
237 parent_size += childrel->width * childrel->rows;
240 * Accumulate per-column estimates too. We need not do anything
241 * for PlaceHolderVars in the parent list. If child expression
242 * isn't a Var, or we didn't record a width estimate for it, we
243 * have to fall back on a datatype-based estimate.
245 * By construction, child's reltargetlist is 1-to-1 with parent's.
247 forboth(parentvars, rel->reltargetlist,
248 childvars, childrel->reltargetlist)
250 Var *parentvar = (Var *) lfirst(parentvars);
251 Node *childvar = (Node *) lfirst(childvars);
253 if (IsA(parentvar, Var))
255 int pndx = parentvar->varattno - rel->min_attr;
256 int32 child_width = 0;
258 if (IsA(childvar, Var))
260 int cndx = ((Var *) childvar)->varattno - childrel->min_attr;
262 child_width = childrel->attr_widths[cndx];
264 if (child_width <= 0)
265 child_width = get_typavgwidth(exprType(childvar),
266 exprTypmod(childvar));
267 Assert(child_width > 0);
268 parent_attrsizes[pndx] += child_width * childrel->rows;
275 * Save the finished size estimates.
277 rel->rows = parent_rows;
282 rel->width = rint(parent_size / parent_rows);
283 for (i = 0; i < nattrs; i++)
284 rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
287 rel->width = 0; /* attr_widths should be zero already */
290 * Set "raw tuples" count equal to "rows" for the appendrel; needed
291 * because some places assume rel->tuples is valid for any baserel.
293 rel->tuples = parent_rows;
295 pfree(parent_attrsizes);
298 * Next, build an unordered Append path for the rel. (Note: this is
299 * correct even if we have zero or one live subpath due to constraint
302 add_path(rel, (Path *) create_append_path(rel, subpaths));
305 * Next, build MergeAppend paths based on the collected list of child
306 * pathkeys. We consider both cheapest-startup and cheapest-total cases,
307 * ie, for each interesting ordering, collect all the cheapest startup
308 * subpaths and all the cheapest total paths, and build a MergeAppend path
311 foreach(l, all_child_pathkeys)
313 List *pathkeys = (List *) lfirst(l);
314 List *startup_subpaths = NIL;
315 List *total_subpaths = NIL;
316 bool startup_neq_total = false;
319 /* Select the child paths for this ordering... */
320 foreach(lcr, live_childrels)
322 RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
323 Path *cheapest_startup,
326 /* Locate the right paths, if they are available. */
328 get_cheapest_path_for_pathkeys(childrel->pathlist,
332 get_cheapest_path_for_pathkeys(childrel->pathlist,
337 * If we can't find any paths with the right order just add the
338 * cheapest-total path; we'll have to sort it.
340 if (cheapest_startup == NULL)
341 cheapest_startup = childrel->cheapest_total_path;
342 if (cheapest_total == NULL)
343 cheapest_total = childrel->cheapest_total_path;
346 * Notice whether we actually have different paths for the
347 * "cheapest" and "total" cases; frequently there will be no point
348 * in two create_merge_append_path() calls.
350 if (cheapest_startup != cheapest_total)
351 startup_neq_total = true;
354 accumulate_append_subpath(startup_subpaths, cheapest_startup);
356 accumulate_append_subpath(total_subpaths, cheapest_total);
359 /* ... and build the MergeAppend paths */
360 add_path(rel, (Path *) create_merge_append_path(root,
364 if (startup_neq_total)
365 add_path(rel, (Path *) create_merge_append_path(root,
371 /* Select cheapest path */
376 * accumulate_append_subpath
377 * Add a subpath to the list being built for an Append or MergeAppend
379 * It's possible that the child is itself an Append path, in which case
380 * we can "cut out the middleman" and just add its child paths to our
381 * own list. (We don't try to do this earlier because we need to
382 * apply both levels of transformation to the quals.)
385 accumulate_append_subpath(List *subpaths, Path *path)
387 if (IsA(path, AppendPath))
389 AppendPath *apath = (AppendPath *) path;
391 /* list_copy is important here to avoid sharing list substructure */
392 return list_concat(subpaths, list_copy(apath->subpaths));
395 return lappend(subpaths, path);
399 * set_dummy_rel_pathlist
400 * Build a dummy path for a relation that's been excluded by constraints
402 * Rather than inventing a special "dummy" path type, we represent this as an
403 * AppendPath with no members (see also IS_DUMMY_PATH macro).
406 set_dummy_rel_pathlist(RelOptInfo *rel)
408 /* Set dummy size estimates --- we leave attr_widths[] as zeroes */
412 add_path(rel, (Path *) create_append_path(rel, NIL));
414 /* Select cheapest path (pretty easy in this case...) */
419 * standard_join_search
420 * Find possible joinpaths for a query by successively finding ways
421 * to join component relations into join relations.
423 * 'levels_needed' is the number of iterations needed, ie, the number of
424 * independent jointree items in the query. This is > 1.
426 * 'initial_rels' is a list of RelOptInfo nodes for each independent
427 * jointree item. These are the components to be joined together.
428 * Note that levels_needed == list_length(initial_rels).
430 * Returns the final level of join relations, i.e., the relation that is
431 * the result of joining all the original relations together.
432 * At least one implementation path must be provided for this relation and
433 * all required sub-relations.
435 * To support loadable plugins that modify planner behavior by changing the
436 * join searching algorithm, we provide a hook variable that lets a plugin
437 * replace or supplement this function. Any such hook must return the same
438 * final join relation as the standard code would, but it might have a
439 * different set of implementation paths attached, and only the sub-joinrels
440 * needed for these paths need have been instantiated.
442 * Note to plugin authors: the functions invoked during standard_join_search()
443 * modify root->join_rel_list and root->join_rel_hash. If you want to do more
444 * than one join-order search, you'll probably need to save and restore the
445 * original states of those data structures. See geqo_eval() for an example.
448 standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
454 * This function cannot be invoked recursively within any one planning
455 * problem, so join_rel_level[] can't be in use already.
457 Assert(root->join_rel_level == NULL);
460 * We employ a simple "dynamic programming" algorithm: we first find all
461 * ways to build joins of two jointree items, then all ways to build joins
462 * of three items (from two-item joins and single items), then four-item
463 * joins, and so on until we have considered all ways to join all the
464 * items into one rel.
466 * root->join_rel_level[j] is a list of all the j-item rels. Initially we
467 * set root->join_rel_level[1] to represent all the single-jointree-item
470 root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
472 root->join_rel_level[1] = initial_rels;
474 for (lev = 2; lev <= levels_needed; lev++)
479 * Determine all possible pairs of relations to be joined at this
480 * level, and build paths for making each one from every available
481 * pair of lower-level relations.
483 join_search_one_level(root, lev);
486 * Do cleanup work on each just-processed rel.
488 foreach(lc, root->join_rel_level[lev])
490 rel = (RelOptInfo *) lfirst(lc);
492 /* Find and save the cheapest paths for this rel */
495 #ifdef OPTIMIZER_DEBUG
496 debug_print_rel(root, rel);
502 * We should have a single rel at the final level.
504 if (root->join_rel_level[levels_needed] == NIL)
505 elog(ERROR, "failed to build any %d-way joins", levels_needed);
506 Assert(list_length(root->join_rel_level[levels_needed]) == 1);
508 rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
510 root->join_rel_level = NULL;
516 * join_search_one_level
517 * Consider ways to produce join relations containing exactly 'level'
518 * jointree items. (This is one step of the dynamic-programming method
519 * embodied in standard_join_search.) Join rel nodes for each feasible
520 * combination of lower-level rels are created and returned in a list.
521 * Implementation paths are created for each such joinrel, too.
523 * level: level of rels we want to make this time
524 * root->join_rel_level[j], 1 <= j < level, is a list of rels containing j items
526 * The result is returned in root->join_rel_level[level].
529 join_search_one_level(PlannerInfo *root, int level)
531 List **joinrels = root->join_rel_level;
535 Assert(joinrels[level] == NIL);
537 /* Set join_cur_level so that new joinrels are added to proper list */
538 root->join_cur_level = level;
541 * First, consider left-sided and right-sided plans, in which rels of
542 * exactly level-1 member relations are joined against initial relations.
543 * We prefer to join using join clauses, but if we find a rel of level-1
544 * members that has no join clauses, we will generate Cartesian-product
545 * joins against all initial rels not already contained in it.
547 * In the first pass (level == 2), we try to join each initial rel to each
548 * initial rel that appears later in joinrels[1]. (The mirror-image joins
549 * are handled automatically by make_join_rel.) In later passes, we try
550 * to join rels of size level-1 from joinrels[level-1] to each initial rel
553 foreach(r, joinrels[level - 1])
555 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
556 ListCell *other_rels;
559 other_rels = lnext(r); /* only consider remaining initial
562 other_rels = list_head(joinrels[1]); /* consider all initial
565 if (old_rel->joininfo != NIL || old_rel->has_eclass_joins ||
566 has_join_restriction(root, old_rel))
569 * Note that if all available join clauses for this rel require
570 * more than one other rel, we will fail to make any joins against
571 * it here. In most cases that's OK; it'll be considered by
572 * "bushy plan" join code in a higher-level pass where we have
573 * those other rels collected into a join rel.
575 * See also the last-ditch case below.
577 make_rels_by_clause_joins(root,
584 * Oops, we have a relation that is not joined to any other
585 * relation, either directly or by join-order restrictions.
586 * Cartesian product time.
588 make_rels_by_clauseless_joins(root,
595 * Now, consider "bushy plans" in which relations of k initial rels are
596 * joined to relations of level-k initial rels, for 2 <= k <= level-2.
598 * We only consider bushy-plan joins for pairs of rels where there is a
599 * suitable join clause (or join order restriction), in order to avoid
600 * unreasonable growth of planning time.
604 int other_level = level - k;
607 * Since make_join_rel(x, y) handles both x,y and y,x cases, we only
608 * need to go as far as the halfway point.
613 foreach(r, joinrels[k])
615 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
616 ListCell *other_rels;
620 * We can ignore clauseless joins here, *except* when they
621 * participate in join-order restrictions --- then we might have
622 * to force a bushy join plan.
624 if (old_rel->joininfo == NIL && !old_rel->has_eclass_joins &&
625 !has_join_restriction(root, old_rel))
628 if (k == other_level)
629 other_rels = lnext(r); /* only consider remaining rels */
631 other_rels = list_head(joinrels[other_level]);
633 for_each_cell(r2, other_rels)
635 RelOptInfo *new_rel = (RelOptInfo *) lfirst(r2);
637 if (!bms_overlap(old_rel->relids, new_rel->relids))
640 * OK, we can build a rel of the right level from this
641 * pair of rels. Do so if there is at least one usable
642 * join clause or a relevant join restriction.
644 if (have_relevant_joinclause(root, old_rel, new_rel) ||
645 have_join_order_restriction(root, old_rel, new_rel))
647 (void) make_join_rel(root, old_rel, new_rel);
655 * Last-ditch effort: if we failed to find any usable joins so far, force
656 * a set of cartesian-product joins to be generated. This handles the
657 * special case where all the available rels have join clauses but we
658 * cannot use any of those clauses yet. An example is
660 * SELECT * FROM a,b,c WHERE (a.f1 + b.f2 + c.f3) = 0;
662 * The join clause will be usable at level 3, but at level 2 we have no
663 * choice but to make cartesian joins. We consider only left-sided and
664 * right-sided cartesian joins in this case (no bushy).
666 if (joinrels[level] == NIL)
669 * This loop is just like the first one, except we always call
670 * make_rels_by_clauseless_joins().
672 foreach(r, joinrels[level - 1])
674 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
675 ListCell *other_rels;
678 other_rels = lnext(r); /* only consider remaining initial
681 other_rels = list_head(joinrels[1]); /* consider all initial
684 make_rels_by_clauseless_joins(root,
690 * When special joins are involved, there may be no legal way
691 * to make an N-way join for some values of N. For example consider
693 * SELECT ... FROM t1 WHERE
694 * x IN (SELECT ... FROM t2,t3 WHERE ...) AND
695 * y IN (SELECT ... FROM t4,t5 WHERE ...)
697 * We will flatten this query to a 5-way join problem, but there are
698 * no 4-way joins that join_is_legal() will consider legal. We have
699 * to accept failure at level 4 and go on to discover a workable
700 * bushy plan at level 5.
702 * However, if there are no special joins then join_is_legal() should
703 * never fail, and so the following sanity check is useful.
706 if (joinrels[level] == NIL && root->join_info_list == NIL)
707 elog(ERROR, "failed to build any %d-way joins", level);
712 * make_rels_by_clause_joins
713 * Build joins between the given relation 'old_rel' and other relations
714 * that participate in join clauses that 'old_rel' also participates in
715 * (or participate in join-order restrictions with it).
716 * The join rels are returned in root->join_rel_level[join_cur_level].
718 * Note: at levels above 2 we will generate the same joined relation in
719 * multiple ways --- for example (a join b) join c is the same RelOptInfo as
720 * (b join c) join a, though the second case will add a different set of Paths
721 * to it. This is the reason for using the join_rel_level mechanism, which
722 * automatically ensures that each new joinrel is only added to the list once.
724 * 'old_rel' is the relation entry for the relation to be joined
725 * 'other_rels': the first cell in a linked list containing the other
726 * rels to be considered for joining
728 * Currently, this is only used with initial rels in other_rels, but it
729 * will work for joining to joinrels too.
732 make_rels_by_clause_joins(PlannerInfo *root,
734 ListCell *other_rels)
738 for_each_cell(l, other_rels)
740 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
742 if (!bms_overlap(old_rel->relids, other_rel->relids) &&
743 (have_relevant_joinclause(root, old_rel, other_rel) ||
744 have_join_order_restriction(root, old_rel, other_rel)))
746 (void) make_join_rel(root, old_rel, other_rel);
752 * make_rels_by_clauseless_joins
753 * Given a relation 'old_rel' and a list of other relations
754 * 'other_rels', create a join relation between 'old_rel' and each
755 * member of 'other_rels' that isn't already included in 'old_rel'.
756 * The join rels are returned in root->join_rel_level[join_cur_level].
758 * 'old_rel' is the relation entry for the relation to be joined
759 * 'other_rels': the first cell of a linked list containing the
760 * other rels to be considered for joining
762 * Currently, this is only used with initial rels in other_rels, but it would
763 * work for joining to joinrels too.
766 make_rels_by_clauseless_joins(PlannerInfo *root,
768 ListCell *other_rels)
772 for_each_cell(l, other_rels)
774 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
776 if (!bms_overlap(other_rel->relids, old_rel->relids))
778 (void) make_join_rel(root, old_rel, other_rel);
785 * Determine whether a proposed join is legal given the query's
786 * join order constraints; and if it is, determine the join type.
788 * Caller must supply not only the two rels, but the union of their relids.
789 * (We could simplify the API by computing joinrelids locally, but this
790 * would be redundant work in the normal path through make_join_rel.)
792 * On success, *sjinfo_p is set to NULL if this is to be a plain inner join,
793 * else it's set to point to the associated SpecialJoinInfo node. Also,
794 * *reversed_p is set TRUE if the given relations need to be swapped to
795 * match the SpecialJoinInfo node.
798 join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
800 SpecialJoinInfo **sjinfo_p, bool *reversed_p)
802 SpecialJoinInfo *match_sjinfo;
809 * Ensure output params are set on failure return. This is just to
810 * suppress uninitialized-variable warnings from overly anal compilers.
816 * If we have any special joins, the proposed join might be illegal; and
817 * in any case we have to determine its join type. Scan the join info
818 * list for conflicts.
822 unique_ified = false;
823 is_valid_inner = true;
825 foreach(l, root->join_info_list)
827 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
830 * This special join is not relevant unless its RHS overlaps the
831 * proposed join. (Check this first as a fast path for dismissing
832 * most irrelevant SJs quickly.)
834 if (!bms_overlap(sjinfo->min_righthand, joinrelids))
838 * Also, not relevant if proposed join is fully contained within RHS
839 * (ie, we're still building up the RHS).
841 if (bms_is_subset(joinrelids, sjinfo->min_righthand))
845 * Also, not relevant if SJ is already done within either input.
847 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
848 bms_is_subset(sjinfo->min_righthand, rel1->relids))
850 if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
851 bms_is_subset(sjinfo->min_righthand, rel2->relids))
855 * If it's a semijoin and we already joined the RHS to any other rels
856 * within either input, then we must have unique-ified the RHS at that
857 * point (see below). Therefore the semijoin is no longer relevant in
860 if (sjinfo->jointype == JOIN_SEMI)
862 if (bms_is_subset(sjinfo->syn_righthand, rel1->relids) &&
863 !bms_equal(sjinfo->syn_righthand, rel1->relids))
865 if (bms_is_subset(sjinfo->syn_righthand, rel2->relids) &&
866 !bms_equal(sjinfo->syn_righthand, rel2->relids))
871 * If one input contains min_lefthand and the other contains
872 * min_righthand, then we can perform the SJ at this join.
874 * Barf if we get matches to more than one SJ (is that possible?)
876 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
877 bms_is_subset(sjinfo->min_righthand, rel2->relids))
880 return false; /* invalid join path */
881 match_sjinfo = sjinfo;
884 else if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
885 bms_is_subset(sjinfo->min_righthand, rel1->relids))
888 return false; /* invalid join path */
889 match_sjinfo = sjinfo;
892 else if (sjinfo->jointype == JOIN_SEMI &&
893 bms_equal(sjinfo->syn_righthand, rel2->relids) &&
894 create_unique_path(root, rel2, rel2->cheapest_total_path,
898 * For a semijoin, we can join the RHS to anything else by
899 * unique-ifying the RHS (if the RHS can be unique-ified).
900 * We will only get here if we have the full RHS but less
901 * than min_lefthand on the LHS.
903 * The reason to consider such a join path is exemplified by
904 * SELECT ... FROM a,b WHERE (a.x,b.y) IN (SELECT c1,c2 FROM c)
905 * If we insist on doing this as a semijoin we will first have
906 * to form the cartesian product of A*B. But if we unique-ify
907 * C then the semijoin becomes a plain innerjoin and we can join
908 * in any order, eg C to A and then to B. When C is much smaller
909 * than A and B this can be a huge win. So we allow C to be
910 * joined to just A or just B here, and then make_join_rel has
911 * to handle the case properly.
913 * Note that actually we'll allow unique-ified C to be joined to
914 * some other relation D here, too. That is legal, if usually not
915 * very sane, and this routine is only concerned with legality not
916 * with whether the join is good strategy.
920 return false; /* invalid join path */
921 match_sjinfo = sjinfo;
925 else if (sjinfo->jointype == JOIN_SEMI &&
926 bms_equal(sjinfo->syn_righthand, rel1->relids) &&
927 create_unique_path(root, rel1, rel1->cheapest_total_path,
930 /* Reversed semijoin case */
932 return false; /* invalid join path */
933 match_sjinfo = sjinfo;
940 * Otherwise, the proposed join overlaps the RHS but isn't
941 * a valid implementation of this SJ. It might still be
942 * a legal join, however. If both inputs overlap the RHS,
943 * assume that it's OK. Since the inputs presumably got past
944 * this function's checks previously, they can't overlap the
945 * LHS and their violations of the RHS boundary must represent
946 * SJs that have been determined to commute with this one.
947 * We have to allow this to work correctly in cases like
948 * (a LEFT JOIN (b JOIN (c LEFT JOIN d)))
949 * when the c/d join has been determined to commute with the join
950 * to a, and hence d is not part of min_righthand for the upper
951 * join. It should be legal to join b to c/d but this will appear
952 * as a violation of the upper join's RHS.
953 * Furthermore, if one input overlaps the RHS and the other does
954 * not, we should still allow the join if it is a valid
955 * implementation of some other SJ. We have to allow this to
956 * support the associative identity
957 * (a LJ b on Pab) LJ c ON Pbc = a LJ (b LJ c ON Pbc) on Pab
958 * since joining B directly to C violates the lower SJ's RHS.
959 * We assume that make_outerjoininfo() set things up correctly
960 * so that we'll only match to some SJ if the join is valid.
961 * Set flag here to check at bottom of loop.
964 if (sjinfo->jointype != JOIN_SEMI &&
965 bms_overlap(rel1->relids, sjinfo->min_righthand) &&
966 bms_overlap(rel2->relids, sjinfo->min_righthand))
969 Assert(!bms_overlap(joinrelids, sjinfo->min_lefthand));
972 is_valid_inner = false;
977 * Fail if violated some SJ's RHS and didn't match to another SJ. However,
978 * "matching" to a semijoin we are implementing by unique-ification
979 * doesn't count (think: it's really an inner join).
981 if (!is_valid_inner &&
982 (match_sjinfo == NULL || unique_ified))
983 return false; /* invalid join path */
985 /* Otherwise, it's a valid join */
986 *sjinfo_p = match_sjinfo;
987 *reversed_p = reversed;
992 * has_join_restriction
993 * Detect whether the specified relation has join-order restrictions
994 * due to being inside an outer join or an IN (sub-SELECT).
996 * Essentially, this tests whether have_join_order_restriction() could
997 * succeed with this rel and some other one. It's OK if we sometimes
998 * say "true" incorrectly. (Therefore, we don't bother with the relatively
999 * expensive has_legal_joinclause test.)
1002 has_join_restriction(PlannerInfo *root, RelOptInfo *rel)
1006 foreach(l, root->join_info_list)
1008 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1010 /* ignore full joins --- other mechanisms preserve their ordering */
1011 if (sjinfo->jointype == JOIN_FULL)
1014 /* ignore if SJ is already contained in rel */
1015 if (bms_is_subset(sjinfo->min_lefthand, rel->relids) &&
1016 bms_is_subset(sjinfo->min_righthand, rel->relids))
1019 /* restricted if it overlaps LHS or RHS, but doesn't contain SJ */
1020 if (bms_overlap(sjinfo->min_lefthand, rel->relids) ||
1021 bms_overlap(sjinfo->min_righthand, rel->relids))
1029 * is_dummy_rel --- has relation been proven empty?
1031 * If so, it will have a single path that is dummy.
1034 is_dummy_rel(RelOptInfo *rel)
1036 return (rel->cheapest_total_path != NULL &&
1037 IS_DUMMY_PATH(rel->cheapest_total_path));
1041 * Mark a relation as proven empty.
1043 * During GEQO planning, this can get invoked more than once on the same
1044 * baserel struct, so it's worth checking to see if the rel is already marked
1047 * Also, when called during GEQO join planning, we are in a short-lived
1048 * memory context. We must make sure that the dummy path attached to a
1049 * baserel survives the GEQO cycle, else the baserel is trashed for future
1050 * GEQO cycles. On the other hand, when we are marking a joinrel during GEQO,
1051 * we don't want the dummy path to clutter the main planning context. Upshot
1052 * is that the best solution is to explicitly make the dummy path in the same
1053 * context the given RelOptInfo is in.
1056 mark_dummy_rel(RelOptInfo *rel)
1058 MemoryContext oldcontext;
1060 /* Already marked? */
1061 if (is_dummy_rel(rel))
1064 /* No, so choose correct context to make the dummy path in */
1065 oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1067 /* Set dummy size estimate */
1070 /* Evict any previously chosen paths */
1071 rel->pathlist = NIL;
1073 /* Set up the dummy path */
1074 add_path(rel, (Path *) create_append_path(rel, NIL));
1076 /* Set or update cheapest_total_path */
1079 MemoryContextSwitchTo(oldcontext);
1083 * restriction_is_constant_false --- is a restrictlist just FALSE?
1085 * In cases where a qual is provably constant FALSE, eval_const_expressions
1086 * will generally have thrown away anything that's ANDed with it. In outer
1087 * join situations this will leave us computing cartesian products only to
1088 * decide there's no match for an outer row, which is pretty stupid. So,
1089 * we need to detect the case.
1091 * If only_pushed_down is TRUE, then consider only pushed-down quals.
1094 restriction_is_constant_false(List *restrictlist, bool only_pushed_down)
1099 * Despite the above comment, the restriction list we see here might
1100 * possibly have other members besides the FALSE constant, since other
1101 * quals could get "pushed down" to the outer join level. So we check
1102 * each member of the list.
1104 foreach(lc, restrictlist)
1106 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1108 Assert(IsA(rinfo, RestrictInfo));
1109 if (only_pushed_down && !rinfo->is_pushed_down)
1112 if (rinfo->clause && IsA(rinfo->clause, Const))
1114 Const *con = (Const *) rinfo->clause;
1116 /* constant NULL is as good as constant FALSE for our purposes */
1117 if (con->constisnull)
1119 if (!DatumGetBool(con->constvalue))