1 /*-------------------------------------------------------------------------
4 * Routines copied from PostgreSQL core distribution.
6 * src/backend/optimizer/path/allpaths.c
7 * set_append_rel_pathlist()
8 * generate_mergeappend_paths()
9 * get_cheapest_parameterized_child_path()
10 * accumulate_append_subpath()
11 * standard_join_search()
13 * src/backend/optimizer/path/joinrels.c
14 * join_search_one_level()
15 * make_rels_by_clause_joins()
16 * make_rels_by_clauseless_joins()
18 * has_join_restriction()
21 * restriction_is_constant_false()
23 * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
24 * Portions Copyright (c) 1994, Regents of the University of California
26 *-------------------------------------------------------------------------
30 * set_append_rel_pathlist
31 * Build access paths for an "append relation"
34 set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
35 Index rti, RangeTblEntry *rte)
37 int parentRTindex = rti;
38 List *live_childrels = NIL;
40 List *all_child_pathkeys = NIL;
41 List *all_child_outers = NIL;
45 * Generate access paths for each member relation, and remember the
46 * cheapest path for each one. Also, identify all pathkeys (orderings)
47 * and parameterizations (required_outer sets) available for the member
50 foreach(l, root->append_rel_list)
52 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
54 RangeTblEntry *childRTE;
58 /* append_rel_list contains all append rels; ignore others */
59 if (appinfo->parent_relid != parentRTindex)
62 /* Re-locate the child RTE and RelOptInfo */
63 childRTindex = appinfo->child_relid;
64 childRTE = root->simple_rte_array[childRTindex];
65 childrel = root->simple_rel_array[childRTindex];
68 * Compute the child's access paths.
70 set_rel_pathlist(root, childrel, childRTindex, childRTE);
73 * If child is dummy, ignore it.
75 if (IS_DUMMY_REL(childrel))
79 * Child is live, so add its cheapest access path to the Append path
80 * we are constructing for the parent.
82 subpaths = accumulate_append_subpath(subpaths,
83 childrel->cheapest_total_path);
85 /* Remember which childrels are live, for logic below */
86 live_childrels = lappend(live_childrels, childrel);
89 * Collect lists of all the available path orderings and
90 * parameterizations for all the children. We use these as a
91 * heuristic to indicate which sort orderings and parameterizations we
92 * should build Append and MergeAppend paths for.
94 foreach(lcp, childrel->pathlist)
96 Path *childpath = (Path *) lfirst(lcp);
97 List *childkeys = childpath->pathkeys;
98 Relids childouter = PATH_REQ_OUTER(childpath);
100 /* Unsorted paths don't contribute to pathkey list */
101 if (childkeys != NIL)
106 /* Have we already seen this ordering? */
107 foreach(lpk, all_child_pathkeys)
109 List *existing_pathkeys = (List *) lfirst(lpk);
111 if (compare_pathkeys(existing_pathkeys,
112 childkeys) == PATHKEYS_EQUAL)
120 /* No, so add it to all_child_pathkeys */
121 all_child_pathkeys = lappend(all_child_pathkeys,
126 /* Unparameterized paths don't contribute to param-set list */
132 /* Have we already seen this param set? */
133 foreach(lco, all_child_outers)
135 Relids existing_outers = (Relids) lfirst(lco);
137 if (bms_equal(existing_outers, childouter))
145 /* No, so add it to all_child_outers */
146 all_child_outers = lappend(all_child_outers,
154 * Next, build an unordered, unparameterized Append path for the rel.
155 * (Note: this is correct even if we have zero or one live subpath due to
156 * constraint exclusion.)
158 add_path(rel, (Path *) create_append_path(rel, subpaths, NULL));
161 * Build unparameterized MergeAppend paths based on the collected list of
164 generate_mergeappend_paths(root, rel, live_childrels, all_child_pathkeys);
167 * Build Append paths for each parameterization seen among the child rels.
168 * (This may look pretty expensive, but in most cases of practical
169 * interest, the child rels will expose mostly the same parameterizations,
170 * so that not that many cases actually get considered here.)
172 * The Append node itself cannot enforce quals, so all qual checking must
173 * be done in the child paths. This means that to have a parameterized
174 * Append path, we must have the exact same parameterization for each
175 * child path; otherwise some children might be failing to check the
176 * moved-down quals. To make them match up, we can try to increase the
177 * parameterization of lesser-parameterized paths.
179 foreach(l, all_child_outers)
181 Relids required_outer = (Relids) lfirst(l);
182 bool subpaths_valid = true;
185 /* Select the child paths for an Append with this parameterization */
187 foreach(lcr, live_childrels)
189 RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
192 subpath = get_cheapest_parameterized_child_path(root,
197 /* failed to make a suitable path for this child */
198 subpaths_valid = false;
201 subpaths = accumulate_append_subpath(subpaths, subpath);
205 add_path(rel, (Path *)
206 create_append_path(rel, subpaths, required_outer));
209 /* Select cheapest paths */
214 * generate_mergeappend_paths
215 * Generate MergeAppend paths for an append relation
217 * Generate a path for each ordering (pathkey list) appearing in
218 * all_child_pathkeys.
220 * We consider both cheapest-startup and cheapest-total cases, ie, for each
221 * interesting ordering, collect all the cheapest startup subpaths and all the
222 * cheapest total paths, and build a MergeAppend path for each case.
224 * We don't currently generate any parameterized MergeAppend paths. While
225 * it would not take much more code here to do so, it's very unclear that it
226 * is worth the planning cycles to investigate such paths: there's little
227 * use for an ordered path on the inside of a nestloop. In fact, it's likely
228 * that the current coding of add_path would reject such paths out of hand,
229 * because add_path gives no credit for sort ordering of parameterized paths,
230 * and a parameterized MergeAppend is going to be more expensive than the
231 * corresponding parameterized Append path. If we ever try harder to support
232 * parameterized mergejoin plans, it might be worth adding support for
233 * parameterized MergeAppends to feed such joins. (See notes in
234 * optimizer/README for why that might not ever happen, though.)
237 generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
238 List *live_childrels,
239 List *all_child_pathkeys)
243 foreach(lcp, all_child_pathkeys)
245 List *pathkeys = (List *) lfirst(lcp);
246 List *startup_subpaths = NIL;
247 List *total_subpaths = NIL;
248 bool startup_neq_total = false;
251 /* Select the child paths for this ordering... */
252 foreach(lcr, live_childrels)
254 RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
255 Path *cheapest_startup,
258 /* Locate the right paths, if they are available. */
260 get_cheapest_path_for_pathkeys(childrel->pathlist,
265 get_cheapest_path_for_pathkeys(childrel->pathlist,
271 * If we can't find any paths with the right order just use the
272 * cheapest-total path; we'll have to sort it later.
274 if (cheapest_startup == NULL || cheapest_total == NULL)
276 cheapest_startup = cheapest_total =
277 childrel->cheapest_total_path;
278 Assert(cheapest_total != NULL);
282 * Notice whether we actually have different paths for the
283 * "cheapest" and "total" cases; frequently there will be no point
284 * in two create_merge_append_path() calls.
286 if (cheapest_startup != cheapest_total)
287 startup_neq_total = true;
290 accumulate_append_subpath(startup_subpaths, cheapest_startup);
292 accumulate_append_subpath(total_subpaths, cheapest_total);
295 /* ... and build the MergeAppend paths */
296 add_path(rel, (Path *) create_merge_append_path(root,
301 if (startup_neq_total)
302 add_path(rel, (Path *) create_merge_append_path(root,
311 * get_cheapest_parameterized_child_path
312 * Get cheapest path for this relation that has exactly the requested
315 * Returns NULL if unable to create such a path.
318 get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
319 Relids required_outer)
325 * Look up the cheapest existing path with no more than the needed
326 * parameterization. If it has exactly the needed parameterization, we're
329 cheapest = get_cheapest_path_for_pathkeys(rel->pathlist,
333 Assert(cheapest != NULL);
334 if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
338 * Otherwise, we can "reparameterize" an existing path to match the given
339 * parameterization, which effectively means pushing down additional
340 * joinquals to be checked within the path's scan. However, some existing
341 * paths might check the available joinquals already while others don't;
342 * therefore, it's not clear which existing path will be cheapest after
343 * reparameterization. We have to go through them all and find out.
346 foreach(lc, rel->pathlist)
348 Path *path = (Path *) lfirst(lc);
350 /* Can't use it if it needs more than requested parameterization */
351 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
355 * Reparameterization can only increase the path's cost, so if it's
356 * already more expensive than the current cheapest, forget it.
358 if (cheapest != NULL &&
359 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
362 /* Reparameterize if needed, then recheck cost */
363 if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
365 path = reparameterize_path(root, path, required_outer, 1.0);
367 continue; /* failed to reparameterize this one */
368 Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
370 if (cheapest != NULL &&
371 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
375 /* We have a new best path */
379 /* Return the best path, or NULL if we found no suitable candidate */
384 * accumulate_append_subpath
385 * Add a subpath to the list being built for an Append or MergeAppend
387 * It's possible that the child is itself an Append path, in which case
388 * we can "cut out the middleman" and just add its child paths to our
389 * own list. (We don't try to do this earlier because we need to
390 * apply both levels of transformation to the quals.)
393 accumulate_append_subpath(List *subpaths, Path *path)
395 if (IsA(path, AppendPath))
397 AppendPath *apath = (AppendPath *) path;
399 /* list_copy is important here to avoid sharing list substructure */
400 return list_concat(subpaths, list_copy(apath->subpaths));
403 return lappend(subpaths, path);
407 * standard_join_search
408 * Find possible joinpaths for a query by successively finding ways
409 * to join component relations into join relations.
411 * 'levels_needed' is the number of iterations needed, ie, the number of
412 * independent jointree items in the query. This is > 1.
414 * 'initial_rels' is a list of RelOptInfo nodes for each independent
415 * jointree item. These are the components to be joined together.
416 * Note that levels_needed == list_length(initial_rels).
418 * Returns the final level of join relations, i.e., the relation that is
419 * the result of joining all the original relations together.
420 * At least one implementation path must be provided for this relation and
421 * all required sub-relations.
423 * To support loadable plugins that modify planner behavior by changing the
424 * join searching algorithm, we provide a hook variable that lets a plugin
425 * replace or supplement this function. Any such hook must return the same
426 * final join relation as the standard code would, but it might have a
427 * different set of implementation paths attached, and only the sub-joinrels
428 * needed for these paths need have been instantiated.
430 * Note to plugin authors: the functions invoked during standard_join_search()
431 * modify root->join_rel_list and root->join_rel_hash. If you want to do more
432 * than one join-order search, you'll probably need to save and restore the
433 * original states of those data structures. See geqo_eval() for an example.
436 standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
442 * This function cannot be invoked recursively within any one planning
443 * problem, so join_rel_level[] can't be in use already.
445 Assert(root->join_rel_level == NULL);
448 * We employ a simple "dynamic programming" algorithm: we first find all
449 * ways to build joins of two jointree items, then all ways to build joins
450 * of three items (from two-item joins and single items), then four-item
451 * joins, and so on until we have considered all ways to join all the
452 * items into one rel.
454 * root->join_rel_level[j] is a list of all the j-item rels. Initially we
455 * set root->join_rel_level[1] to represent all the single-jointree-item
458 root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
460 root->join_rel_level[1] = initial_rels;
462 for (lev = 2; lev <= levels_needed; lev++)
467 * Determine all possible pairs of relations to be joined at this
468 * level, and build paths for making each one from every available
469 * pair of lower-level relations.
471 join_search_one_level(root, lev);
474 * Do cleanup work on each just-processed rel.
476 foreach(lc, root->join_rel_level[lev])
478 rel = (RelOptInfo *) lfirst(lc);
480 /* Find and save the cheapest paths for this rel */
483 #ifdef OPTIMIZER_DEBUG
484 debug_print_rel(root, rel);
490 * We should have a single rel at the final level.
492 if (root->join_rel_level[levels_needed] == NIL)
493 elog(ERROR, "failed to build any %d-way joins", levels_needed);
494 Assert(list_length(root->join_rel_level[levels_needed]) == 1);
496 rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
498 root->join_rel_level = NULL;
504 * join_search_one_level
505 * Consider ways to produce join relations containing exactly 'level'
506 * jointree items. (This is one step of the dynamic-programming method
507 * embodied in standard_join_search.) Join rel nodes for each feasible
508 * combination of lower-level rels are created and returned in a list.
509 * Implementation paths are created for each such joinrel, too.
511 * level: level of rels we want to make this time
512 * root->join_rel_level[j], 1 <= j < level, is a list of rels containing j items
514 * The result is returned in root->join_rel_level[level].
517 join_search_one_level(PlannerInfo *root, int level)
519 List **joinrels = root->join_rel_level;
523 Assert(joinrels[level] == NIL);
525 /* Set join_cur_level so that new joinrels are added to proper list */
526 root->join_cur_level = level;
529 * First, consider left-sided and right-sided plans, in which rels of
530 * exactly level-1 member relations are joined against initial relations.
531 * We prefer to join using join clauses, but if we find a rel of level-1
532 * members that has no join clauses, we will generate Cartesian-product
533 * joins against all initial rels not already contained in it.
535 foreach(r, joinrels[level - 1])
537 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
539 if (old_rel->joininfo != NIL || old_rel->has_eclass_joins ||
540 has_join_restriction(root, old_rel))
543 * There are join clauses or join order restrictions relevant to
544 * this rel, so consider joins between this rel and (only) those
545 * initial rels it is linked to by a clause or restriction.
547 * At level 2 this condition is symmetric, so there is no need to
548 * look at initial rels before this one in the list; we already
549 * considered such joins when we were at the earlier rel. (The
550 * mirror-image joins are handled automatically by make_join_rel.)
551 * In later passes (level > 2), we join rels of the previous level
552 * to each initial rel they don't already include but have a join
553 * clause or restriction with.
555 ListCell *other_rels;
557 if (level == 2) /* consider remaining initial rels */
558 other_rels = lnext(r);
559 else /* consider all initial rels */
560 other_rels = list_head(joinrels[1]);
562 make_rels_by_clause_joins(root,
569 * Oops, we have a relation that is not joined to any other
570 * relation, either directly or by join-order restrictions.
571 * Cartesian product time.
573 * We consider a cartesian product with each not-already-included
574 * initial rel, whether it has other join clauses or not. At
575 * level 2, if there are two or more clauseless initial rels, we
576 * will redundantly consider joining them in both directions; but
577 * such cases aren't common enough to justify adding complexity to
578 * avoid the duplicated effort.
580 make_rels_by_clauseless_joins(root,
582 list_head(joinrels[1]));
587 * Now, consider "bushy plans" in which relations of k initial rels are
588 * joined to relations of level-k initial rels, for 2 <= k <= level-2.
590 * We only consider bushy-plan joins for pairs of rels where there is a
591 * suitable join clause (or join order restriction), in order to avoid
592 * unreasonable growth of planning time.
596 int other_level = level - k;
599 * Since make_join_rel(x, y) handles both x,y and y,x cases, we only
600 * need to go as far as the halfway point.
605 foreach(r, joinrels[k])
607 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
608 ListCell *other_rels;
612 * We can ignore relations without join clauses here, unless they
613 * participate in join-order restrictions --- then we might have
614 * to force a bushy join plan.
616 if (old_rel->joininfo == NIL && !old_rel->has_eclass_joins &&
617 !has_join_restriction(root, old_rel))
620 if (k == other_level)
621 other_rels = lnext(r); /* only consider remaining rels */
623 other_rels = list_head(joinrels[other_level]);
625 for_each_cell(r2, other_rels)
627 RelOptInfo *new_rel = (RelOptInfo *) lfirst(r2);
629 if (!bms_overlap(old_rel->relids, new_rel->relids))
632 * OK, we can build a rel of the right level from this
633 * pair of rels. Do so if there is at least one relevant
634 * join clause or join order restriction.
636 if (have_relevant_joinclause(root, old_rel, new_rel) ||
637 have_join_order_restriction(root, old_rel, new_rel))
639 (void) make_join_rel(root, old_rel, new_rel);
647 * Last-ditch effort: if we failed to find any usable joins so far, force
648 * a set of cartesian-product joins to be generated. This handles the
649 * special case where all the available rels have join clauses but we
650 * cannot use any of those clauses yet. This can only happen when we are
651 * considering a join sub-problem (a sub-joinlist) and all the rels in the
652 * sub-problem have only join clauses with rels outside the sub-problem.
655 * SELECT ... FROM a INNER JOIN b ON TRUE, c, d, ...
656 * WHERE a.w = c.x and b.y = d.z;
658 * If the "a INNER JOIN b" sub-problem does not get flattened into the
659 * upper level, we must be willing to make a cartesian join of a and b;
660 * but the code above will not have done so, because it thought that both
661 * a and b have joinclauses. We consider only left-sided and right-sided
662 * cartesian joins in this case (no bushy).
665 if (joinrels[level] == NIL)
668 * This loop is just like the first one, except we always call
669 * make_rels_by_clauseless_joins().
671 foreach(r, joinrels[level - 1])
673 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
675 make_rels_by_clauseless_joins(root,
677 list_head(joinrels[1]));
681 * When special joins are involved, there may be no legal way
682 * to make an N-way join for some values of N. For example consider
684 * SELECT ... FROM t1 WHERE
685 * x IN (SELECT ... FROM t2,t3 WHERE ...) AND
686 * y IN (SELECT ... FROM t4,t5 WHERE ...)
688 * We will flatten this query to a 5-way join problem, but there are
689 * no 4-way joins that join_is_legal() will consider legal. We have
690 * to accept failure at level 4 and go on to discover a workable
691 * bushy plan at level 5.
693 * However, if there are no special joins then join_is_legal() should
694 * never fail, and so the following sanity check is useful.
697 if (joinrels[level] == NIL && root->join_info_list == NIL)
698 elog(ERROR, "failed to build any %d-way joins", level);
703 * make_rels_by_clause_joins
704 * Build joins between the given relation 'old_rel' and other relations
705 * that participate in join clauses that 'old_rel' also participates in
706 * (or participate in join-order restrictions with it).
707 * The join rels are returned in root->join_rel_level[join_cur_level].
709 * Note: at levels above 2 we will generate the same joined relation in
710 * multiple ways --- for example (a join b) join c is the same RelOptInfo as
711 * (b join c) join a, though the second case will add a different set of Paths
712 * to it. This is the reason for using the join_rel_level mechanism, which
713 * automatically ensures that each new joinrel is only added to the list once.
715 * 'old_rel' is the relation entry for the relation to be joined
716 * 'other_rels': the first cell in a linked list containing the other
717 * rels to be considered for joining
719 * Currently, this is only used with initial rels in other_rels, but it
720 * will work for joining to joinrels too.
723 make_rels_by_clause_joins(PlannerInfo *root,
725 ListCell *other_rels)
729 for_each_cell(l, other_rels)
731 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
733 if (!bms_overlap(old_rel->relids, other_rel->relids) &&
734 (have_relevant_joinclause(root, old_rel, other_rel) ||
735 have_join_order_restriction(root, old_rel, other_rel)))
737 (void) make_join_rel(root, old_rel, other_rel);
743 * make_rels_by_clauseless_joins
744 * Given a relation 'old_rel' and a list of other relations
745 * 'other_rels', create a join relation between 'old_rel' and each
746 * member of 'other_rels' that isn't already included in 'old_rel'.
747 * The join rels are returned in root->join_rel_level[join_cur_level].
749 * 'old_rel' is the relation entry for the relation to be joined
750 * 'other_rels': the first cell of a linked list containing the
751 * other rels to be considered for joining
753 * Currently, this is only used with initial rels in other_rels, but it would
754 * work for joining to joinrels too.
757 make_rels_by_clauseless_joins(PlannerInfo *root,
759 ListCell *other_rels)
763 for_each_cell(l, other_rels)
765 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
767 if (!bms_overlap(other_rel->relids, old_rel->relids))
769 (void) make_join_rel(root, old_rel, other_rel);
776 * Determine whether a proposed join is legal given the query's
777 * join order constraints; and if it is, determine the join type.
779 * Caller must supply not only the two rels, but the union of their relids.
780 * (We could simplify the API by computing joinrelids locally, but this
781 * would be redundant work in the normal path through make_join_rel.)
783 * On success, *sjinfo_p is set to NULL if this is to be a plain inner join,
784 * else it's set to point to the associated SpecialJoinInfo node. Also,
785 * *reversed_p is set TRUE if the given relations need to be swapped to
786 * match the SpecialJoinInfo node.
789 join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
791 SpecialJoinInfo **sjinfo_p, bool *reversed_p)
793 SpecialJoinInfo *match_sjinfo;
800 * Ensure output params are set on failure return. This is just to
801 * suppress uninitialized-variable warnings from overly anal compilers.
807 * If we have any special joins, the proposed join might be illegal; and
808 * in any case we have to determine its join type. Scan the join info
809 * list for conflicts.
813 unique_ified = false;
814 is_valid_inner = true;
816 foreach(l, root->join_info_list)
818 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
821 * This special join is not relevant unless its RHS overlaps the
822 * proposed join. (Check this first as a fast path for dismissing
823 * most irrelevant SJs quickly.)
825 if (!bms_overlap(sjinfo->min_righthand, joinrelids))
829 * Also, not relevant if proposed join is fully contained within RHS
830 * (ie, we're still building up the RHS).
832 if (bms_is_subset(joinrelids, sjinfo->min_righthand))
836 * Also, not relevant if SJ is already done within either input.
838 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
839 bms_is_subset(sjinfo->min_righthand, rel1->relids))
841 if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
842 bms_is_subset(sjinfo->min_righthand, rel2->relids))
846 * If it's a semijoin and we already joined the RHS to any other rels
847 * within either input, then we must have unique-ified the RHS at that
848 * point (see below). Therefore the semijoin is no longer relevant in
851 if (sjinfo->jointype == JOIN_SEMI)
853 if (bms_is_subset(sjinfo->syn_righthand, rel1->relids) &&
854 !bms_equal(sjinfo->syn_righthand, rel1->relids))
856 if (bms_is_subset(sjinfo->syn_righthand, rel2->relids) &&
857 !bms_equal(sjinfo->syn_righthand, rel2->relids))
862 * If one input contains min_lefthand and the other contains
863 * min_righthand, then we can perform the SJ at this join.
865 * Barf if we get matches to more than one SJ (is that possible?)
867 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
868 bms_is_subset(sjinfo->min_righthand, rel2->relids))
871 return false; /* invalid join path */
872 match_sjinfo = sjinfo;
875 else if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
876 bms_is_subset(sjinfo->min_righthand, rel1->relids))
879 return false; /* invalid join path */
880 match_sjinfo = sjinfo;
883 else if (sjinfo->jointype == JOIN_SEMI &&
884 bms_equal(sjinfo->syn_righthand, rel2->relids) &&
885 create_unique_path(root, rel2, rel2->cheapest_total_path,
889 * For a semijoin, we can join the RHS to anything else by
890 * unique-ifying the RHS (if the RHS can be unique-ified).
891 * We will only get here if we have the full RHS but less
892 * than min_lefthand on the LHS.
894 * The reason to consider such a join path is exemplified by
895 * SELECT ... FROM a,b WHERE (a.x,b.y) IN (SELECT c1,c2 FROM c)
896 * If we insist on doing this as a semijoin we will first have
897 * to form the cartesian product of A*B. But if we unique-ify
898 * C then the semijoin becomes a plain innerjoin and we can join
899 * in any order, eg C to A and then to B. When C is much smaller
900 * than A and B this can be a huge win. So we allow C to be
901 * joined to just A or just B here, and then make_join_rel has
902 * to handle the case properly.
904 * Note that actually we'll allow unique-ified C to be joined to
905 * some other relation D here, too. That is legal, if usually not
906 * very sane, and this routine is only concerned with legality not
907 * with whether the join is good strategy.
911 return false; /* invalid join path */
912 match_sjinfo = sjinfo;
916 else if (sjinfo->jointype == JOIN_SEMI &&
917 bms_equal(sjinfo->syn_righthand, rel1->relids) &&
918 create_unique_path(root, rel1, rel1->cheapest_total_path,
921 /* Reversed semijoin case */
923 return false; /* invalid join path */
924 match_sjinfo = sjinfo;
931 * Otherwise, the proposed join overlaps the RHS but isn't
932 * a valid implementation of this SJ. It might still be
933 * a legal join, however. If both inputs overlap the RHS,
934 * assume that it's OK. Since the inputs presumably got past
935 * this function's checks previously, they can't overlap the
936 * LHS and their violations of the RHS boundary must represent
937 * SJs that have been determined to commute with this one.
938 * We have to allow this to work correctly in cases like
939 * (a LEFT JOIN (b JOIN (c LEFT JOIN d)))
940 * when the c/d join has been determined to commute with the join
941 * to a, and hence d is not part of min_righthand for the upper
942 * join. It should be legal to join b to c/d but this will appear
943 * as a violation of the upper join's RHS.
944 * Furthermore, if one input overlaps the RHS and the other does
945 * not, we should still allow the join if it is a valid
946 * implementation of some other SJ. We have to allow this to
947 * support the associative identity
948 * (a LJ b on Pab) LJ c ON Pbc = a LJ (b LJ c ON Pbc) on Pab
949 * since joining B directly to C violates the lower SJ's RHS.
950 * We assume that make_outerjoininfo() set things up correctly
951 * so that we'll only match to some SJ if the join is valid.
952 * Set flag here to check at bottom of loop.
955 if (sjinfo->jointype != JOIN_SEMI &&
956 bms_overlap(rel1->relids, sjinfo->min_righthand) &&
957 bms_overlap(rel2->relids, sjinfo->min_righthand))
960 Assert(!bms_overlap(joinrelids, sjinfo->min_lefthand));
963 is_valid_inner = false;
968 * Fail if violated some SJ's RHS and didn't match to another SJ. However,
969 * "matching" to a semijoin we are implementing by unique-ification
970 * doesn't count (think: it's really an inner join).
972 if (!is_valid_inner &&
973 (match_sjinfo == NULL || unique_ified))
974 return false; /* invalid join path */
976 /* Otherwise, it's a valid join */
977 *sjinfo_p = match_sjinfo;
978 *reversed_p = reversed;
983 * has_join_restriction
984 * Detect whether the specified relation has join-order restrictions
985 * due to being inside an outer join or an IN (sub-SELECT).
987 * Essentially, this tests whether have_join_order_restriction() could
988 * succeed with this rel and some other one. It's OK if we sometimes
989 * say "true" incorrectly. (Therefore, we don't bother with the relatively
990 * expensive has_legal_joinclause test.)
993 has_join_restriction(PlannerInfo *root, RelOptInfo *rel)
997 foreach(l, root->join_info_list)
999 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1001 /* ignore full joins --- other mechanisms preserve their ordering */
1002 if (sjinfo->jointype == JOIN_FULL)
1005 /* ignore if SJ is already contained in rel */
1006 if (bms_is_subset(sjinfo->min_lefthand, rel->relids) &&
1007 bms_is_subset(sjinfo->min_righthand, rel->relids))
1010 /* restricted if it overlaps LHS or RHS, but doesn't contain SJ */
1011 if (bms_overlap(sjinfo->min_lefthand, rel->relids) ||
1012 bms_overlap(sjinfo->min_righthand, rel->relids))
1020 * is_dummy_rel --- has relation been proven empty?
1023 is_dummy_rel(RelOptInfo *rel)
1025 return IS_DUMMY_REL(rel);
1029 * Mark a relation as proven empty.
1031 * During GEQO planning, this can get invoked more than once on the same
1032 * baserel struct, so it's worth checking to see if the rel is already marked
1035 * Also, when called during GEQO join planning, we are in a short-lived
1036 * memory context. We must make sure that the dummy path attached to a
1037 * baserel survives the GEQO cycle, else the baserel is trashed for future
1038 * GEQO cycles. On the other hand, when we are marking a joinrel during GEQO,
1039 * we don't want the dummy path to clutter the main planning context. Upshot
1040 * is that the best solution is to explicitly make the dummy path in the same
1041 * context the given RelOptInfo is in.
1044 mark_dummy_rel(RelOptInfo *rel)
1046 MemoryContext oldcontext;
1048 /* Already marked? */
1049 if (is_dummy_rel(rel))
1052 /* No, so choose correct context to make the dummy path in */
1053 oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1055 /* Set dummy size estimate */
1058 /* Evict any previously chosen paths */
1059 rel->pathlist = NIL;
1061 /* Set up the dummy path */
1062 add_path(rel, (Path *) create_append_path(rel, NIL, NULL));
1064 /* Set or update cheapest_total_path and related fields */
1067 MemoryContextSwitchTo(oldcontext);
1071 * restriction_is_constant_false --- is a restrictlist just FALSE?
1073 * In cases where a qual is provably constant FALSE, eval_const_expressions
1074 * will generally have thrown away anything that's ANDed with it. In outer
1075 * join situations this will leave us computing cartesian products only to
1076 * decide there's no match for an outer row, which is pretty stupid. So,
1077 * we need to detect the case.
1079 * If only_pushed_down is TRUE, then consider only pushed-down quals.
1082 restriction_is_constant_false(List *restrictlist, bool only_pushed_down)
1087 * Despite the above comment, the restriction list we see here might
1088 * possibly have other members besides the FALSE constant, since other
1089 * quals could get "pushed down" to the outer join level. So we check
1090 * each member of the list.
1092 foreach(lc, restrictlist)
1094 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1096 Assert(IsA(rinfo, RestrictInfo));
1097 if (only_pushed_down && !rinfo->is_pushed_down)
1100 if (rinfo->clause && IsA(rinfo->clause, Const))
1102 Const *con = (Const *) rinfo->clause;
1104 /* constant NULL is as good as constant FALSE for our purposes */
1105 if (con->constisnull)
1107 if (!DatumGetBool(con->constvalue))