1 /*-------------------------------------------------------------------------
4 * Routines copied from PostgreSQL core distribution.
7 * The main purpose of this files is having access to static functions in core.
8 * Another purpose is tweaking functions behavior by replacing part of them by
9 * macro definitions. See at the end of pg_hint_plan.c for details. Anyway,
10 * this file *must* contain required functions without making any change.
12 * This file contains the following functions from corresponding files.
14 * src/backend/optimizer/path/allpaths.c
17 * set_plain_rel_pathlist()
18 * set_append_rel_pathlist()
19 * add_paths_to_append_rel()
20 * generate_mergeappend_paths()
21 * get_cheapest_parameterized_child_path()
22 * accumulate_append_subpath()
25 * standard_join_search(): This funcion is not static. The reason for
26 * including this function is make_rels_by_clause_joins. In order to
27 * avoid generating apparently unwanted join combination, we decided to
28 * change the behavior of make_join_rel, which is called under this
31 * src/backend/optimizer/path/joinrels.c
34 * join_search_one_level(): We have to modify this to call my definition of
35 * make_rels_by_clause_joins.
38 * make_rels_by_clause_joins()
39 * make_rels_by_clauseless_joins()
41 * has_join_restriction()
44 * restriction_is_constant_false()
47 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
48 * Portions Copyright (c) 1994, Regents of the University of California
50 *-------------------------------------------------------------------------
55 * set_plain_rel_pathlist
56 * Build access paths for a plain relation (no subquery, no inheritance)
59 set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
61 Relids required_outer;
64 * We don't support pushing join clauses into the quals of a seqscan, but
65 * it could still have required parameterization due to LATERAL refs in
68 required_outer = rel->lateral_relids;
70 /* Consider sequential scan */
71 add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
73 /* If appropriate, consider parallel sequential scan */
74 if (rel->consider_parallel && required_outer == NULL)
75 create_plain_partial_paths(root, rel);
77 /* Consider index scans */
78 create_index_paths(root, rel);
80 /* Consider TID scans */
81 create_tidscan_paths(root, rel);
86 * set_append_rel_pathlist
87 * Build access paths for an "append relation"
90 set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
91 Index rti, RangeTblEntry *rte)
93 int parentRTindex = rti;
94 List *live_childrels = NIL;
98 * Generate access paths for each member relation, and remember the
101 foreach(l, root->append_rel_list)
103 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
105 RangeTblEntry *childRTE;
106 RelOptInfo *childrel;
108 /* append_rel_list contains all append rels; ignore others */
109 if (appinfo->parent_relid != parentRTindex)
112 /* Re-locate the child RTE and RelOptInfo */
113 childRTindex = appinfo->child_relid;
114 childRTE = root->simple_rte_array[childRTindex];
115 childrel = root->simple_rel_array[childRTindex];
118 * If set_append_rel_size() decided the parent appendrel was
119 * parallel-unsafe at some point after visiting this child rel, we
120 * need to propagate the unsafety marking down to the child, so that
121 * we don't generate useless partial paths for it.
123 if (!rel->consider_parallel)
124 childrel->consider_parallel = false;
127 * Compute the child's access paths.
129 set_rel_pathlist(root, childrel, childRTindex, childRTE);
132 * If child is dummy, ignore it.
134 if (IS_DUMMY_REL(childrel))
138 * Child is live, so add it to the live_childrels list for use below.
140 live_childrels = lappend(live_childrels, childrel);
143 /* Add paths to the "append" relation. */
144 add_paths_to_append_rel(root, rel, live_childrels);
148 * add_paths_to_append_rel
149 * Generate paths for given "append" relation given the set of non-dummy
152 * The function collects all parameterizations and orderings supported by the
153 * non-dummy children. For every such parameterization or ordering, it creates
154 * an append path collecting one path from each non-dummy child with given
155 * parameterization or ordering. Similarly it collects partial paths from
156 * non-dummy children to create partial append paths.
159 add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
160 List *live_childrels)
162 List *subpaths = NIL;
163 bool subpaths_valid = true;
164 List *partial_subpaths = NIL;
165 bool partial_subpaths_valid = true;
166 List *all_child_pathkeys = NIL;
167 List *all_child_outers = NIL;
169 List *partitioned_rels = NIL;
171 bool build_partitioned_rels = false;
174 * A plain relation will already have a PartitionedChildRelInfo if it is
175 * partitioned. For a subquery RTE, no PartitionedChildRelInfo exists; we
176 * collect all partitioned_rels associated with any child. (This assumes
177 * that we don't need to look through multiple levels of subquery RTEs; if
178 * we ever do, we could create a PartitionedChildRelInfo with the
179 * accumulated list of partitioned_rels which would then be found when
180 * populated our parent rel with paths. For the present, that appears to
183 rte = planner_rt_fetch(rel->relid, root);
184 switch (rte->rtekind)
187 if (rte->relkind == RELKIND_PARTITIONED_TABLE)
190 get_partitioned_child_rels(root, rel->relid);
191 Assert(list_length(partitioned_rels) >= 1);
195 build_partitioned_rels = true;
198 elog(ERROR, "unexpcted rtekind: %d", (int) rte->rtekind);
202 * For every non-dummy child, remember the cheapest path. Also, identify
203 * all pathkeys (orderings) and parameterizations (required_outer sets)
204 * available for the non-dummy member relations.
206 foreach(l, live_childrels)
208 RelOptInfo *childrel = lfirst(l);
212 * If we need to build partitioned_rels, accumulate the partitioned
213 * rels for this child.
215 if (build_partitioned_rels)
219 cprels = get_partitioned_child_rels(root, childrel->relid);
220 partitioned_rels = list_concat(partitioned_rels,
225 * If child has an unparameterized cheapest-total path, add that to
226 * the unparameterized Append path we are constructing for the parent.
227 * If not, there's no workable unparameterized path.
229 if (childrel->cheapest_total_path->param_info == NULL)
230 subpaths = accumulate_append_subpath(subpaths,
231 childrel->cheapest_total_path);
233 subpaths_valid = false;
235 /* Same idea, but for a partial plan. */
236 if (childrel->partial_pathlist != NIL)
237 partial_subpaths = accumulate_append_subpath(partial_subpaths,
238 linitial(childrel->partial_pathlist));
240 partial_subpaths_valid = false;
243 * Collect lists of all the available path orderings and
244 * parameterizations for all the children. We use these as a
245 * heuristic to indicate which sort orderings and parameterizations we
246 * should build Append and MergeAppend paths for.
248 foreach(lcp, childrel->pathlist)
250 Path *childpath = (Path *) lfirst(lcp);
251 List *childkeys = childpath->pathkeys;
252 Relids childouter = PATH_REQ_OUTER(childpath);
254 /* Unsorted paths don't contribute to pathkey list */
255 if (childkeys != NIL)
260 /* Have we already seen this ordering? */
261 foreach(lpk, all_child_pathkeys)
263 List *existing_pathkeys = (List *) lfirst(lpk);
265 if (compare_pathkeys(existing_pathkeys,
266 childkeys) == PATHKEYS_EQUAL)
274 /* No, so add it to all_child_pathkeys */
275 all_child_pathkeys = lappend(all_child_pathkeys,
280 /* Unparameterized paths don't contribute to param-set list */
286 /* Have we already seen this param set? */
287 foreach(lco, all_child_outers)
289 Relids existing_outers = (Relids) lfirst(lco);
291 if (bms_equal(existing_outers, childouter))
299 /* No, so add it to all_child_outers */
300 all_child_outers = lappend(all_child_outers,
308 * If we found unparameterized paths for all children, build an unordered,
309 * unparameterized Append path for the rel. (Note: this is correct even
310 * if we have zero or one live subpath due to constraint exclusion.)
313 add_path(rel, (Path *) create_append_path(rel, subpaths, NULL, 0,
317 * Consider an append of partial unordered, unparameterized partial paths.
319 if (partial_subpaths_valid)
321 AppendPath *appendpath;
323 int parallel_workers = 0;
326 * Decide on the number of workers to request for this append path.
327 * For now, we just use the maximum value from among the members. It
328 * might be useful to use a higher number if the Append node were
329 * smart enough to spread out the workers, but it currently isn't.
331 foreach(lc, partial_subpaths)
333 Path *path = lfirst(lc);
335 parallel_workers = Max(parallel_workers, path->parallel_workers);
337 Assert(parallel_workers > 0);
339 /* Generate a partial append path. */
340 appendpath = create_append_path(rel, partial_subpaths, NULL,
341 parallel_workers, partitioned_rels);
342 add_partial_path(rel, (Path *) appendpath);
346 * Also build unparameterized MergeAppend paths based on the collected
347 * list of child pathkeys.
350 generate_mergeappend_paths(root, rel, live_childrels,
355 * Build Append paths for each parameterization seen among the child rels.
356 * (This may look pretty expensive, but in most cases of practical
357 * interest, the child rels will expose mostly the same parameterizations,
358 * so that not that many cases actually get considered here.)
360 * The Append node itself cannot enforce quals, so all qual checking must
361 * be done in the child paths. This means that to have a parameterized
362 * Append path, we must have the exact same parameterization for each
363 * child path; otherwise some children might be failing to check the
364 * moved-down quals. To make them match up, we can try to increase the
365 * parameterization of lesser-parameterized paths.
367 foreach(l, all_child_outers)
369 Relids required_outer = (Relids) lfirst(l);
372 /* Select the child paths for an Append with this parameterization */
374 subpaths_valid = true;
375 foreach(lcr, live_childrels)
377 RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
380 subpath = get_cheapest_parameterized_child_path(root,
385 /* failed to make a suitable path for this child */
386 subpaths_valid = false;
389 subpaths = accumulate_append_subpath(subpaths, subpath);
393 add_path(rel, (Path *)
394 create_append_path(rel, subpaths, required_outer, 0,
401 * generate_mergeappend_paths
402 * Generate MergeAppend paths for an append relation
404 * Generate a path for each ordering (pathkey list) appearing in
405 * all_child_pathkeys.
407 * We consider both cheapest-startup and cheapest-total cases, ie, for each
408 * interesting ordering, collect all the cheapest startup subpaths and all the
409 * cheapest total paths, and build a MergeAppend path for each case.
411 * We don't currently generate any parameterized MergeAppend paths. While
412 * it would not take much more code here to do so, it's very unclear that it
413 * is worth the planning cycles to investigate such paths: there's little
414 * use for an ordered path on the inside of a nestloop. In fact, it's likely
415 * that the current coding of add_path would reject such paths out of hand,
416 * because add_path gives no credit for sort ordering of parameterized paths,
417 * and a parameterized MergeAppend is going to be more expensive than the
418 * corresponding parameterized Append path. If we ever try harder to support
419 * parameterized mergejoin plans, it might be worth adding support for
420 * parameterized MergeAppends to feed such joins. (See notes in
421 * optimizer/README for why that might not ever happen, though.)
424 generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
425 List *live_childrels,
426 List *all_child_pathkeys,
427 List *partitioned_rels)
431 foreach(lcp, all_child_pathkeys)
433 List *pathkeys = (List *) lfirst(lcp);
434 List *startup_subpaths = NIL;
435 List *total_subpaths = NIL;
436 bool startup_neq_total = false;
439 /* Select the child paths for this ordering... */
440 foreach(lcr, live_childrels)
442 RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
443 Path *cheapest_startup,
446 /* Locate the right paths, if they are available. */
448 get_cheapest_path_for_pathkeys(childrel->pathlist,
454 get_cheapest_path_for_pathkeys(childrel->pathlist,
461 * If we can't find any paths with the right order just use the
462 * cheapest-total path; we'll have to sort it later.
464 if (cheapest_startup == NULL || cheapest_total == NULL)
466 cheapest_startup = cheapest_total =
467 childrel->cheapest_total_path;
468 /* Assert we do have an unparameterized path for this child */
469 Assert(cheapest_total->param_info == NULL);
473 * Notice whether we actually have different paths for the
474 * "cheapest" and "total" cases; frequently there will be no point
475 * in two create_merge_append_path() calls.
477 if (cheapest_startup != cheapest_total)
478 startup_neq_total = true;
481 accumulate_append_subpath(startup_subpaths, cheapest_startup);
483 accumulate_append_subpath(total_subpaths, cheapest_total);
486 /* ... and build the MergeAppend paths */
487 add_path(rel, (Path *) create_merge_append_path(root,
493 if (startup_neq_total)
494 add_path(rel, (Path *) create_merge_append_path(root,
505 * get_cheapest_parameterized_child_path
506 * Get cheapest path for this relation that has exactly the requested
509 * Returns NULL if unable to create such a path.
512 get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
513 Relids required_outer)
519 * Look up the cheapest existing path with no more than the needed
520 * parameterization. If it has exactly the needed parameterization, we're
523 cheapest = get_cheapest_path_for_pathkeys(rel->pathlist,
528 Assert(cheapest != NULL);
529 if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
533 * Otherwise, we can "reparameterize" an existing path to match the given
534 * parameterization, which effectively means pushing down additional
535 * joinquals to be checked within the path's scan. However, some existing
536 * paths might check the available joinquals already while others don't;
537 * therefore, it's not clear which existing path will be cheapest after
538 * reparameterization. We have to go through them all and find out.
541 foreach(lc, rel->pathlist)
543 Path *path = (Path *) lfirst(lc);
545 /* Can't use it if it needs more than requested parameterization */
546 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
550 * Reparameterization can only increase the path's cost, so if it's
551 * already more expensive than the current cheapest, forget it.
553 if (cheapest != NULL &&
554 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
557 /* Reparameterize if needed, then recheck cost */
558 if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
560 path = reparameterize_path(root, path, required_outer, 1.0);
562 continue; /* failed to reparameterize this one */
563 Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
565 if (cheapest != NULL &&
566 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
570 /* We have a new best path */
574 /* Return the best path, or NULL if we found no suitable candidate */
580 * accumulate_append_subpath
581 * Add a subpath to the list being built for an Append or MergeAppend
583 * It's possible that the child is itself an Append or MergeAppend path, in
584 * which case we can "cut out the middleman" and just add its child paths to
585 * our own list. (We don't try to do this earlier because we need to apply
586 * both levels of transformation to the quals.)
588 * Note that if we omit a child MergeAppend in this way, we are effectively
589 * omitting a sort step, which seems fine: if the parent is to be an Append,
590 * its result would be unsorted anyway, while if the parent is to be a
591 * MergeAppend, there's no point in a separate sort on a child.
594 accumulate_append_subpath(List *subpaths, Path *path)
596 if (IsA(path, AppendPath))
598 AppendPath *apath = (AppendPath *) path;
600 /* list_copy is important here to avoid sharing list substructure */
601 return list_concat(subpaths, list_copy(apath->subpaths));
603 else if (IsA(path, MergeAppendPath))
605 MergeAppendPath *mpath = (MergeAppendPath *) path;
607 /* list_copy is important here to avoid sharing list substructure */
608 return list_concat(subpaths, list_copy(mpath->subpaths));
611 return lappend(subpaths, path);
616 * standard_join_search
617 * Find possible joinpaths for a query by successively finding ways
618 * to join component relations into join relations.
620 * 'levels_needed' is the number of iterations needed, ie, the number of
621 * independent jointree items in the query. This is > 1.
623 * 'initial_rels' is a list of RelOptInfo nodes for each independent
624 * jointree item. These are the components to be joined together.
625 * Note that levels_needed == list_length(initial_rels).
627 * Returns the final level of join relations, i.e., the relation that is
628 * the result of joining all the original relations together.
629 * At least one implementation path must be provided for this relation and
630 * all required sub-relations.
632 * To support loadable plugins that modify planner behavior by changing the
633 * join searching algorithm, we provide a hook variable that lets a plugin
634 * replace or supplement this function. Any such hook must return the same
635 * final join relation as the standard code would, but it might have a
636 * different set of implementation paths attached, and only the sub-joinrels
637 * needed for these paths need have been instantiated.
639 * Note to plugin authors: the functions invoked during standard_join_search()
640 * modify root->join_rel_list and root->join_rel_hash. If you want to do more
641 * than one join-order search, you'll probably need to save and restore the
642 * original states of those data structures. See geqo_eval() for an example.
645 standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
651 * This function cannot be invoked recursively within any one planning
652 * problem, so join_rel_level[] can't be in use already.
654 Assert(root->join_rel_level == NULL);
657 * We employ a simple "dynamic programming" algorithm: we first find all
658 * ways to build joins of two jointree items, then all ways to build joins
659 * of three items (from two-item joins and single items), then four-item
660 * joins, and so on until we have considered all ways to join all the
661 * items into one rel.
663 * root->join_rel_level[j] is a list of all the j-item rels. Initially we
664 * set root->join_rel_level[1] to represent all the single-jointree-item
667 root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
669 root->join_rel_level[1] = initial_rels;
671 for (lev = 2; lev <= levels_needed; lev++)
676 * Determine all possible pairs of relations to be joined at this
677 * level, and build paths for making each one from every available
678 * pair of lower-level relations.
680 join_search_one_level(root, lev);
683 * Run generate_gather_paths() for each just-processed joinrel. We
684 * could not do this earlier because both regular and partial paths
685 * can get added to a particular joinrel at multiple times within
686 * join_search_one_level. After that, we're done creating paths for
687 * the joinrel, so run set_cheapest().
689 foreach(lc, root->join_rel_level[lev])
691 rel = (RelOptInfo *) lfirst(lc);
693 /* Create GatherPaths for any useful partial paths for rel */
694 generate_gather_paths(root, rel);
696 /* Find and save the cheapest paths for this rel */
699 #ifdef OPTIMIZER_DEBUG
700 debug_print_rel(root, rel);
706 * We should have a single rel at the final level.
708 if (root->join_rel_level[levels_needed] == NIL)
709 elog(ERROR, "failed to build any %d-way joins", levels_needed);
710 Assert(list_length(root->join_rel_level[levels_needed]) == 1);
712 rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
714 root->join_rel_level = NULL;
720 * create_plain_partial_paths
721 * Build partial access paths for parallel scan of a plain relation
724 create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
726 int parallel_workers;
728 parallel_workers = compute_parallel_worker(rel, rel->pages, -1);
730 /* If any limit was set to zero, the user doesn't want a parallel scan. */
731 if (parallel_workers <= 0)
734 /* Add an unordered partial path based on a parallel sequential scan. */
735 add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
740 * join_search_one_level
741 * Consider ways to produce join relations containing exactly 'level'
742 * jointree items. (This is one step of the dynamic-programming method
743 * embodied in standard_join_search.) Join rel nodes for each feasible
744 * combination of lower-level rels are created and returned in a list.
745 * Implementation paths are created for each such joinrel, too.
747 * level: level of rels we want to make this time
748 * root->join_rel_level[j], 1 <= j < level, is a list of rels containing j items
750 * The result is returned in root->join_rel_level[level].
753 join_search_one_level(PlannerInfo *root, int level)
755 List **joinrels = root->join_rel_level;
759 Assert(joinrels[level] == NIL);
761 /* Set join_cur_level so that new joinrels are added to proper list */
762 root->join_cur_level = level;
765 * First, consider left-sided and right-sided plans, in which rels of
766 * exactly level-1 member relations are joined against initial relations.
767 * We prefer to join using join clauses, but if we find a rel of level-1
768 * members that has no join clauses, we will generate Cartesian-product
769 * joins against all initial rels not already contained in it.
771 foreach(r, joinrels[level - 1])
773 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
775 if (old_rel->joininfo != NIL || old_rel->has_eclass_joins ||
776 has_join_restriction(root, old_rel))
779 * There are join clauses or join order restrictions relevant to
780 * this rel, so consider joins between this rel and (only) those
781 * initial rels it is linked to by a clause or restriction.
783 * At level 2 this condition is symmetric, so there is no need to
784 * look at initial rels before this one in the list; we already
785 * considered such joins when we were at the earlier rel. (The
786 * mirror-image joins are handled automatically by make_join_rel.)
787 * In later passes (level > 2), we join rels of the previous level
788 * to each initial rel they don't already include but have a join
789 * clause or restriction with.
791 ListCell *other_rels;
793 if (level == 2) /* consider remaining initial rels */
794 other_rels = lnext(r);
795 else /* consider all initial rels */
796 other_rels = list_head(joinrels[1]);
798 make_rels_by_clause_joins(root,
805 * Oops, we have a relation that is not joined to any other
806 * relation, either directly or by join-order restrictions.
807 * Cartesian product time.
809 * We consider a cartesian product with each not-already-included
810 * initial rel, whether it has other join clauses or not. At
811 * level 2, if there are two or more clauseless initial rels, we
812 * will redundantly consider joining them in both directions; but
813 * such cases aren't common enough to justify adding complexity to
814 * avoid the duplicated effort.
816 make_rels_by_clauseless_joins(root,
818 list_head(joinrels[1]));
823 * Now, consider "bushy plans" in which relations of k initial rels are
824 * joined to relations of level-k initial rels, for 2 <= k <= level-2.
826 * We only consider bushy-plan joins for pairs of rels where there is a
827 * suitable join clause (or join order restriction), in order to avoid
828 * unreasonable growth of planning time.
832 int other_level = level - k;
835 * Since make_join_rel(x, y) handles both x,y and y,x cases, we only
836 * need to go as far as the halfway point.
841 foreach(r, joinrels[k])
843 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
844 ListCell *other_rels;
848 * We can ignore relations without join clauses here, unless they
849 * participate in join-order restrictions --- then we might have
850 * to force a bushy join plan.
852 if (old_rel->joininfo == NIL && !old_rel->has_eclass_joins &&
853 !has_join_restriction(root, old_rel))
856 if (k == other_level)
857 other_rels = lnext(r); /* only consider remaining rels */
859 other_rels = list_head(joinrels[other_level]);
861 for_each_cell(r2, other_rels)
863 RelOptInfo *new_rel = (RelOptInfo *) lfirst(r2);
865 if (!bms_overlap(old_rel->relids, new_rel->relids))
868 * OK, we can build a rel of the right level from this
869 * pair of rels. Do so if there is at least one relevant
870 * join clause or join order restriction.
872 if (have_relevant_joinclause(root, old_rel, new_rel) ||
873 have_join_order_restriction(root, old_rel, new_rel))
875 (void) make_join_rel(root, old_rel, new_rel);
883 * Last-ditch effort: if we failed to find any usable joins so far, force
884 * a set of cartesian-product joins to be generated. This handles the
885 * special case where all the available rels have join clauses but we
886 * cannot use any of those clauses yet. This can only happen when we are
887 * considering a join sub-problem (a sub-joinlist) and all the rels in the
888 * sub-problem have only join clauses with rels outside the sub-problem.
891 * SELECT ... FROM a INNER JOIN b ON TRUE, c, d, ...
892 * WHERE a.w = c.x and b.y = d.z;
894 * If the "a INNER JOIN b" sub-problem does not get flattened into the
895 * upper level, we must be willing to make a cartesian join of a and b;
896 * but the code above will not have done so, because it thought that both
897 * a and b have joinclauses. We consider only left-sided and right-sided
898 * cartesian joins in this case (no bushy).
901 if (joinrels[level] == NIL)
904 * This loop is just like the first one, except we always call
905 * make_rels_by_clauseless_joins().
907 foreach(r, joinrels[level - 1])
909 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
911 make_rels_by_clauseless_joins(root,
913 list_head(joinrels[1]));
917 * When special joins are involved, there may be no legal way
918 * to make an N-way join for some values of N. For example consider
920 * SELECT ... FROM t1 WHERE
921 * x IN (SELECT ... FROM t2,t3 WHERE ...) AND
922 * y IN (SELECT ... FROM t4,t5 WHERE ...)
924 * We will flatten this query to a 5-way join problem, but there are
925 * no 4-way joins that join_is_legal() will consider legal. We have
926 * to accept failure at level 4 and go on to discover a workable
927 * bushy plan at level 5.
929 * However, if there are no special joins and no lateral references
930 * then join_is_legal() should never fail, and so the following sanity
934 if (joinrels[level] == NIL &&
935 root->join_info_list == NIL &&
936 !root->hasLateralRTEs)
937 elog(ERROR, "failed to build any %d-way joins", level);
943 * make_rels_by_clause_joins
944 * Build joins between the given relation 'old_rel' and other relations
945 * that participate in join clauses that 'old_rel' also participates in
946 * (or participate in join-order restrictions with it).
947 * The join rels are returned in root->join_rel_level[join_cur_level].
949 * Note: at levels above 2 we will generate the same joined relation in
950 * multiple ways --- for example (a join b) join c is the same RelOptInfo as
951 * (b join c) join a, though the second case will add a different set of Paths
952 * to it. This is the reason for using the join_rel_level mechanism, which
953 * automatically ensures that each new joinrel is only added to the list once.
955 * 'old_rel' is the relation entry for the relation to be joined
956 * 'other_rels': the first cell in a linked list containing the other
957 * rels to be considered for joining
959 * Currently, this is only used with initial rels in other_rels, but it
960 * will work for joining to joinrels too.
963 make_rels_by_clause_joins(PlannerInfo *root,
965 ListCell *other_rels)
969 for_each_cell(l, other_rels)
971 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
973 if (!bms_overlap(old_rel->relids, other_rel->relids) &&
974 (have_relevant_joinclause(root, old_rel, other_rel) ||
975 have_join_order_restriction(root, old_rel, other_rel)))
977 (void) make_join_rel(root, old_rel, other_rel);
984 * make_rels_by_clauseless_joins
985 * Given a relation 'old_rel' and a list of other relations
986 * 'other_rels', create a join relation between 'old_rel' and each
987 * member of 'other_rels' that isn't already included in 'old_rel'.
988 * The join rels are returned in root->join_rel_level[join_cur_level].
990 * 'old_rel' is the relation entry for the relation to be joined
991 * 'other_rels': the first cell of a linked list containing the
992 * other rels to be considered for joining
994 * Currently, this is only used with initial rels in other_rels, but it would
995 * work for joining to joinrels too.
998 make_rels_by_clauseless_joins(PlannerInfo *root,
1000 ListCell *other_rels)
1004 for_each_cell(l, other_rels)
1006 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
1008 if (!bms_overlap(other_rel->relids, old_rel->relids))
1010 (void) make_join_rel(root, old_rel, other_rel);
1018 * Determine whether a proposed join is legal given the query's
1019 * join order constraints; and if it is, determine the join type.
1021 * Caller must supply not only the two rels, but the union of their relids.
1022 * (We could simplify the API by computing joinrelids locally, but this
1023 * would be redundant work in the normal path through make_join_rel.)
1025 * On success, *sjinfo_p is set to NULL if this is to be a plain inner join,
1026 * else it's set to point to the associated SpecialJoinInfo node. Also,
1027 * *reversed_p is set TRUE if the given relations need to be swapped to
1028 * match the SpecialJoinInfo node.
1031 join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
1033 SpecialJoinInfo **sjinfo_p, bool *reversed_p)
1035 SpecialJoinInfo *match_sjinfo;
1038 bool must_be_leftjoin;
1042 * Ensure output params are set on failure return. This is just to
1043 * suppress uninitialized-variable warnings from overly anal compilers.
1046 *reversed_p = false;
1049 * If we have any special joins, the proposed join might be illegal; and
1050 * in any case we have to determine its join type. Scan the join info
1051 * list for matches and conflicts.
1053 match_sjinfo = NULL;
1055 unique_ified = false;
1056 must_be_leftjoin = false;
1058 foreach(l, root->join_info_list)
1060 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1063 * This special join is not relevant unless its RHS overlaps the
1064 * proposed join. (Check this first as a fast path for dismissing
1065 * most irrelevant SJs quickly.)
1067 if (!bms_overlap(sjinfo->min_righthand, joinrelids))
1071 * Also, not relevant if proposed join is fully contained within RHS
1072 * (ie, we're still building up the RHS).
1074 if (bms_is_subset(joinrelids, sjinfo->min_righthand))
1078 * Also, not relevant if SJ is already done within either input.
1080 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
1081 bms_is_subset(sjinfo->min_righthand, rel1->relids))
1083 if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
1084 bms_is_subset(sjinfo->min_righthand, rel2->relids))
1088 * If it's a semijoin and we already joined the RHS to any other rels
1089 * within either input, then we must have unique-ified the RHS at that
1090 * point (see below). Therefore the semijoin is no longer relevant in
1093 if (sjinfo->jointype == JOIN_SEMI)
1095 if (bms_is_subset(sjinfo->syn_righthand, rel1->relids) &&
1096 !bms_equal(sjinfo->syn_righthand, rel1->relids))
1098 if (bms_is_subset(sjinfo->syn_righthand, rel2->relids) &&
1099 !bms_equal(sjinfo->syn_righthand, rel2->relids))
1104 * If one input contains min_lefthand and the other contains
1105 * min_righthand, then we can perform the SJ at this join.
1107 * Reject if we get matches to more than one SJ; that implies we're
1108 * considering something that's not really valid.
1110 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
1111 bms_is_subset(sjinfo->min_righthand, rel2->relids))
1114 return false; /* invalid join path */
1115 match_sjinfo = sjinfo;
1118 else if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
1119 bms_is_subset(sjinfo->min_righthand, rel1->relids))
1122 return false; /* invalid join path */
1123 match_sjinfo = sjinfo;
1126 else if (sjinfo->jointype == JOIN_SEMI &&
1127 bms_equal(sjinfo->syn_righthand, rel2->relids) &&
1128 create_unique_path(root, rel2, rel2->cheapest_total_path,
1132 * For a semijoin, we can join the RHS to anything else by
1133 * unique-ifying the RHS (if the RHS can be unique-ified).
1134 * We will only get here if we have the full RHS but less
1135 * than min_lefthand on the LHS.
1137 * The reason to consider such a join path is exemplified by
1138 * SELECT ... FROM a,b WHERE (a.x,b.y) IN (SELECT c1,c2 FROM c)
1139 * If we insist on doing this as a semijoin we will first have
1140 * to form the cartesian product of A*B. But if we unique-ify
1141 * C then the semijoin becomes a plain innerjoin and we can join
1142 * in any order, eg C to A and then to B. When C is much smaller
1143 * than A and B this can be a huge win. So we allow C to be
1144 * joined to just A or just B here, and then make_join_rel has
1145 * to handle the case properly.
1147 * Note that actually we'll allow unique-ified C to be joined to
1148 * some other relation D here, too. That is legal, if usually not
1149 * very sane, and this routine is only concerned with legality not
1150 * with whether the join is good strategy.
1154 return false; /* invalid join path */
1155 match_sjinfo = sjinfo;
1157 unique_ified = true;
1159 else if (sjinfo->jointype == JOIN_SEMI &&
1160 bms_equal(sjinfo->syn_righthand, rel1->relids) &&
1161 create_unique_path(root, rel1, rel1->cheapest_total_path,
1164 /* Reversed semijoin case */
1166 return false; /* invalid join path */
1167 match_sjinfo = sjinfo;
1169 unique_ified = true;
1174 * Otherwise, the proposed join overlaps the RHS but isn't a valid
1175 * implementation of this SJ. But don't panic quite yet: the RHS
1176 * violation might have occurred previously, in one or both input
1177 * relations, in which case we must have previously decided that
1178 * it was OK to commute some other SJ with this one. If we need
1179 * to perform this join to finish building up the RHS, rejecting
1180 * it could lead to not finding any plan at all. (This can occur
1181 * because of the heuristics elsewhere in this file that postpone
1182 * clauseless joins: we might not consider doing a clauseless join
1183 * within the RHS until after we've performed other, validly
1184 * commutable SJs with one or both sides of the clauseless join.)
1185 * This consideration boils down to the rule that if both inputs
1186 * overlap the RHS, we can allow the join --- they are either
1187 * fully within the RHS, or represent previously-allowed joins to
1190 if (bms_overlap(rel1->relids, sjinfo->min_righthand) &&
1191 bms_overlap(rel2->relids, sjinfo->min_righthand))
1192 continue; /* assume valid previous violation of RHS */
1195 * The proposed join could still be legal, but only if we're
1196 * allowed to associate it into the RHS of this SJ. That means
1197 * this SJ must be a LEFT join (not SEMI or ANTI, and certainly
1198 * not FULL) and the proposed join must not overlap the LHS.
1200 if (sjinfo->jointype != JOIN_LEFT ||
1201 bms_overlap(joinrelids, sjinfo->min_lefthand))
1202 return false; /* invalid join path */
1205 * To be valid, the proposed join must be a LEFT join; otherwise
1206 * it can't associate into this SJ's RHS. But we may not yet have
1207 * found the SpecialJoinInfo matching the proposed join, so we
1208 * can't test that yet. Remember the requirement for later.
1210 must_be_leftjoin = true;
1215 * Fail if violated any SJ's RHS and didn't match to a LEFT SJ: the
1216 * proposed join can't associate into an SJ's RHS.
1218 * Also, fail if the proposed join's predicate isn't strict; we're
1219 * essentially checking to see if we can apply outer-join identity 3, and
1220 * that's a requirement. (This check may be redundant with checks in
1221 * make_outerjoininfo, but I'm not quite sure, and it's cheap to test.)
1223 if (must_be_leftjoin &&
1224 (match_sjinfo == NULL ||
1225 match_sjinfo->jointype != JOIN_LEFT ||
1226 !match_sjinfo->lhs_strict))
1227 return false; /* invalid join path */
1230 * We also have to check for constraints imposed by LATERAL references.
1232 if (root->hasLateralRTEs)
1236 Relids join_lateral_rels;
1239 * The proposed rels could each contain lateral references to the
1240 * other, in which case the join is impossible. If there are lateral
1241 * references in just one direction, then the join has to be done with
1242 * a nestloop with the lateral referencer on the inside. If the join
1243 * matches an SJ that cannot be implemented by such a nestloop, the
1244 * join is impossible.
1246 * Also, if the lateral reference is only indirect, we should reject
1247 * the join; whatever rel(s) the reference chain goes through must be
1250 * Another case that might keep us from building a valid plan is the
1251 * implementation restriction described by have_dangerous_phv().
1253 lateral_fwd = bms_overlap(rel1->relids, rel2->lateral_relids);
1254 lateral_rev = bms_overlap(rel2->relids, rel1->lateral_relids);
1255 if (lateral_fwd && lateral_rev)
1256 return false; /* have lateral refs in both directions */
1259 /* has to be implemented as nestloop with rel1 on left */
1263 match_sjinfo->jointype == JOIN_FULL))
1264 return false; /* not implementable as nestloop */
1265 /* check there is a direct reference from rel2 to rel1 */
1266 if (!bms_overlap(rel1->relids, rel2->direct_lateral_relids))
1267 return false; /* only indirect refs, so reject */
1268 /* check we won't have a dangerous PHV */
1269 if (have_dangerous_phv(root, rel1->relids, rel2->lateral_relids))
1270 return false; /* might be unable to handle required PHV */
1272 else if (lateral_rev)
1274 /* has to be implemented as nestloop with rel2 on left */
1278 match_sjinfo->jointype == JOIN_FULL))
1279 return false; /* not implementable as nestloop */
1280 /* check there is a direct reference from rel1 to rel2 */
1281 if (!bms_overlap(rel2->relids, rel1->direct_lateral_relids))
1282 return false; /* only indirect refs, so reject */
1283 /* check we won't have a dangerous PHV */
1284 if (have_dangerous_phv(root, rel2->relids, rel1->lateral_relids))
1285 return false; /* might be unable to handle required PHV */
1289 * LATERAL references could also cause problems later on if we accept
1290 * this join: if the join's minimum parameterization includes any rels
1291 * that would have to be on the inside of an outer join with this join
1292 * rel, then it's never going to be possible to build the complete
1293 * query using this join. We should reject this join not only because
1294 * it'll save work, but because if we don't, the clauseless-join
1295 * heuristics might think that legality of this join means that some
1296 * other join rel need not be formed, and that could lead to failure
1297 * to find any plan at all. We have to consider not only rels that
1298 * are directly on the inner side of an OJ with the joinrel, but also
1299 * ones that are indirectly so, so search to find all such rels.
1301 join_lateral_rels = min_join_parameterization(root, joinrelids,
1303 if (join_lateral_rels)
1305 Relids join_plus_rhs = bms_copy(joinrelids);
1311 foreach(l, root->join_info_list)
1313 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1315 if (bms_overlap(sjinfo->min_lefthand, join_plus_rhs) &&
1316 !bms_is_subset(sjinfo->min_righthand, join_plus_rhs))
1318 join_plus_rhs = bms_add_members(join_plus_rhs,
1319 sjinfo->min_righthand);
1322 /* full joins constrain both sides symmetrically */
1323 if (sjinfo->jointype == JOIN_FULL &&
1324 bms_overlap(sjinfo->min_righthand, join_plus_rhs) &&
1325 !bms_is_subset(sjinfo->min_lefthand, join_plus_rhs))
1327 join_plus_rhs = bms_add_members(join_plus_rhs,
1328 sjinfo->min_lefthand);
1333 if (bms_overlap(join_plus_rhs, join_lateral_rels))
1334 return false; /* will not be able to join to some RHS rel */
1338 /* Otherwise, it's a valid join */
1339 *sjinfo_p = match_sjinfo;
1340 *reversed_p = reversed;
1346 * has_join_restriction
1347 * Detect whether the specified relation has join-order restrictions,
1348 * due to being inside an outer join or an IN (sub-SELECT),
1349 * or participating in any LATERAL references or multi-rel PHVs.
1351 * Essentially, this tests whether have_join_order_restriction() could
1352 * succeed with this rel and some other one. It's OK if we sometimes
1353 * say "true" incorrectly. (Therefore, we don't bother with the relatively
1354 * expensive has_legal_joinclause test.)
1357 has_join_restriction(PlannerInfo *root, RelOptInfo *rel)
1361 if (rel->lateral_relids != NULL || rel->lateral_referencers != NULL)
1364 foreach(l, root->placeholder_list)
1366 PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l);
1368 if (bms_is_subset(rel->relids, phinfo->ph_eval_at) &&
1369 !bms_equal(rel->relids, phinfo->ph_eval_at))
1373 foreach(l, root->join_info_list)
1375 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1377 /* ignore full joins --- other mechanisms preserve their ordering */
1378 if (sjinfo->jointype == JOIN_FULL)
1381 /* ignore if SJ is already contained in rel */
1382 if (bms_is_subset(sjinfo->min_lefthand, rel->relids) &&
1383 bms_is_subset(sjinfo->min_righthand, rel->relids))
1386 /* restricted if it overlaps LHS or RHS, but doesn't contain SJ */
1387 if (bms_overlap(sjinfo->min_lefthand, rel->relids) ||
1388 bms_overlap(sjinfo->min_righthand, rel->relids))
1397 * is_dummy_rel --- has relation been proven empty?
1400 is_dummy_rel(RelOptInfo *rel)
1402 return IS_DUMMY_REL(rel);
1407 * Mark a relation as proven empty.
1409 * During GEQO planning, this can get invoked more than once on the same
1410 * baserel struct, so it's worth checking to see if the rel is already marked
1413 * Also, when called during GEQO join planning, we are in a short-lived
1414 * memory context. We must make sure that the dummy path attached to a
1415 * baserel survives the GEQO cycle, else the baserel is trashed for future
1416 * GEQO cycles. On the other hand, when we are marking a joinrel during GEQO,
1417 * we don't want the dummy path to clutter the main planning context. Upshot
1418 * is that the best solution is to explicitly make the dummy path in the same
1419 * context the given RelOptInfo is in.
1422 mark_dummy_rel(RelOptInfo *rel)
1424 MemoryContext oldcontext;
1426 /* Already marked? */
1427 if (is_dummy_rel(rel))
1430 /* No, so choose correct context to make the dummy path in */
1431 oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1433 /* Set dummy size estimate */
1436 /* Evict any previously chosen paths */
1437 rel->pathlist = NIL;
1438 rel->partial_pathlist = NIL;
1440 /* Set up the dummy path */
1441 add_path(rel, (Path *) create_append_path(rel, NIL, NULL, 0, NIL));
1443 /* Set or update cheapest_total_path and related fields */
1446 MemoryContextSwitchTo(oldcontext);
1451 * restriction_is_constant_false --- is a restrictlist just FALSE?
1453 * In cases where a qual is provably constant FALSE, eval_const_expressions
1454 * will generally have thrown away anything that's ANDed with it. In outer
1455 * join situations this will leave us computing cartesian products only to
1456 * decide there's no match for an outer row, which is pretty stupid. So,
1457 * we need to detect the case.
1459 * If only_pushed_down is TRUE, then consider only pushed-down quals.
1462 restriction_is_constant_false(List *restrictlist, bool only_pushed_down)
1467 * Despite the above comment, the restriction list we see here might
1468 * possibly have other members besides the FALSE constant, since other
1469 * quals could get "pushed down" to the outer join level. So we check
1470 * each member of the list.
1472 foreach(lc, restrictlist)
1474 RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
1476 if (only_pushed_down && !rinfo->is_pushed_down)
1479 if (rinfo->clause && IsA(rinfo->clause, Const))
1481 Const *con = (Const *) rinfo->clause;
1483 /* constant NULL is as good as constant FALSE for our purposes */
1484 if (con->constisnull)
1486 if (!DatumGetBool(con->constvalue))