OSDN Git Service

Call standrad_ProcessUtility when no hook is set
[pghintplan/pg_hint_plan.git] / core.c
1 /*-------------------------------------------------------------------------
2  *
3  * core.c
4  *        Routines copied from PostgreSQL core distribution.
5  *
6
7  * The main purpose of this files is having access to static functions in core.
8  * Another purpose is tweaking functions behavior by replacing part of them by
9  * macro definitions. See at the end of pg_hint_plan.c for details. Anyway,
10  * this file *must* contain required functions without making any change.
11  *
12  * This file contains the following functions from corresponding files.
13  *
14  * src/backend/optimizer/path/allpaths.c
15  *
16  *      static functions:
17  *     set_plain_rel_pathlist()
18  *     create_plain_partial_paths()
19  *     set_append_rel_pathlist()
20  *     add_paths_to_append_rel()
21  *     generate_mergeappend_paths()
22  *     get_cheapest_parameterized_child_path()
23  *     accumulate_append_subpath()
24  *
25  *  public functions:
26  *     standard_join_search(): This funcion is not static. The reason for
27  *        including this function is make_rels_by_clause_joins. In order to
28  *        avoid generating apparently unwanted join combination, we decided to
29  *        change the behavior of make_join_rel, which is called under this
30  *        function.
31  *
32  * src/backend/optimizer/path/joinrels.c
33  *
34  *      public functions:
35  *     join_search_one_level(): We have to modify this to call my definition of
36  *                  make_rels_by_clause_joins.
37  *
38  *      static functions:
39  *     make_rels_by_clause_joins()
40  *     make_rels_by_clauseless_joins()
41  *     join_is_legal()
42  *     has_join_restriction()
43  *     is_dummy_rel()
44  *     mark_dummy_rel()
45  *     restriction_is_constant_false()
46  *
47  *
48  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
49  * Portions Copyright (c) 1994, Regents of the University of California
50  *
51  *-------------------------------------------------------------------------
52  */
53
54
55 /*
56  * set_plain_rel_pathlist
57  *        Build access paths for a plain relation (no subquery, no inheritance)
58  */
59 static void
60 set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
61 {
62         Relids          required_outer;
63
64         /*
65          * We don't support pushing join clauses into the quals of a seqscan, but
66          * it could still have required parameterization due to LATERAL refs in
67          * its tlist.
68          */
69         required_outer = rel->lateral_relids;
70
71         /* Consider sequential scan */
72         add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
73
74         /* If appropriate, consider parallel sequential scan */
75         if (rel->consider_parallel && required_outer == NULL)
76                 create_plain_partial_paths(root, rel);
77
78         /* Consider index scans */
79         create_index_paths(root, rel);
80
81         /* Consider TID scans */
82         create_tidscan_paths(root, rel);
83 }
84
85
86 /*
87  * create_plain_partial_paths
88  *        Build partial access paths for parallel scan of a plain relation
89  */
90 static void
91 create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
92 {
93         int                     parallel_workers;
94
95         parallel_workers = compute_parallel_worker(rel, rel->pages, -1);
96
97         /* If any limit was set to zero, the user doesn't want a parallel scan. */
98         if (parallel_workers <= 0)
99                 return;
100
101         /* Add an unordered partial path based on a parallel sequential scan. */
102         add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
103 }
104
105
106 /*
107  * set_append_rel_pathlist
108  *        Build access paths for an "append relation"
109  */
110 static void
111 set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
112                                                 Index rti, RangeTblEntry *rte)
113 {
114         int                     parentRTindex = rti;
115         List       *live_childrels = NIL;
116         ListCell   *l;
117
118         /*
119          * Generate access paths for each member relation, and remember the
120          * non-dummy children.
121          */
122         foreach(l, root->append_rel_list)
123         {
124                 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
125                 int                     childRTindex;
126                 RangeTblEntry *childRTE;
127                 RelOptInfo *childrel;
128
129                 /* append_rel_list contains all append rels; ignore others */
130                 if (appinfo->parent_relid != parentRTindex)
131                         continue;
132
133                 /* Re-locate the child RTE and RelOptInfo */
134                 childRTindex = appinfo->child_relid;
135                 childRTE = root->simple_rte_array[childRTindex];
136                 childrel = root->simple_rel_array[childRTindex];
137
138                 /*
139                  * If set_append_rel_size() decided the parent appendrel was
140                  * parallel-unsafe at some point after visiting this child rel, we
141                  * need to propagate the unsafety marking down to the child, so that
142                  * we don't generate useless partial paths for it.
143                  */
144                 if (!rel->consider_parallel)
145                         childrel->consider_parallel = false;
146
147                 /*
148                  * Compute the child's access paths.
149                  */
150                 set_rel_pathlist(root, childrel, childRTindex, childRTE);
151
152                 /*
153                  * If child is dummy, ignore it.
154                  */
155                 if (IS_DUMMY_REL(childrel))
156                         continue;
157
158                 /*
159                  * Child is live, so add it to the live_childrels list for use below.
160                  */
161                 live_childrels = lappend(live_childrels, childrel);
162         }
163
164         /* Add paths to the "append" relation. */
165         add_paths_to_append_rel(root, rel, live_childrels);
166 }
167
168 /*
169  * add_paths_to_append_rel
170  *              Generate paths for given "append" relation given the set of non-dummy
171  *              child rels.
172  *
173  * The function collects all parameterizations and orderings supported by the
174  * non-dummy children. For every such parameterization or ordering, it creates
175  * an append path collecting one path from each non-dummy child with given
176  * parameterization or ordering. Similarly it collects partial paths from
177  * non-dummy children to create partial append paths.
178  */
179 static void
180 add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
181                                                 List *live_childrels)
182 {
183         List       *subpaths = NIL;
184         bool            subpaths_valid = true;
185         List       *partial_subpaths = NIL;
186         bool            partial_subpaths_valid = true;
187         List       *all_child_pathkeys = NIL;
188         List       *all_child_outers = NIL;
189         ListCell   *l;
190         List       *partitioned_rels = NIL;
191         RangeTblEntry *rte;
192         bool            build_partitioned_rels = false;
193
194         /*
195          * A plain relation will already have a PartitionedChildRelInfo if it is
196          * partitioned.  For a subquery RTE, no PartitionedChildRelInfo exists; we
197          * collect all partitioned_rels associated with any child.  (This assumes
198          * that we don't need to look through multiple levels of subquery RTEs; if
199          * we ever do, we could create a PartitionedChildRelInfo with the
200          * accumulated list of partitioned_rels which would then be found when
201          * populated our parent rel with paths.  For the present, that appears to
202          * be unnecessary.)
203          */
204         rte = planner_rt_fetch(rel->relid, root);
205         switch (rte->rtekind)
206         {
207                 case RTE_RELATION:
208                         if (rte->relkind == RELKIND_PARTITIONED_TABLE)
209                         {
210                                 partitioned_rels =
211                                         get_partitioned_child_rels(root, rel->relid);
212                                 Assert(list_length(partitioned_rels) >= 1);
213                         }
214                         break;
215                 case RTE_SUBQUERY:
216                         build_partitioned_rels = true;
217                         break;
218                 default:
219                         elog(ERROR, "unexpected rtekind: %d", (int) rte->rtekind);
220         }
221
222         /*
223          * For every non-dummy child, remember the cheapest path.  Also, identify
224          * all pathkeys (orderings) and parameterizations (required_outer sets)
225          * available for the non-dummy member relations.
226          */
227         foreach(l, live_childrels)
228         {
229                 RelOptInfo *childrel = lfirst(l);
230                 ListCell   *lcp;
231
232                 /*
233                  * If we need to build partitioned_rels, accumulate the partitioned
234                  * rels for this child.
235                  */
236                 if (build_partitioned_rels)
237                 {
238                         List       *cprels;
239
240                         cprels = get_partitioned_child_rels(root, childrel->relid);
241                         partitioned_rels = list_concat(partitioned_rels,
242                                                                                    list_copy(cprels));
243                 }
244
245                 /*
246                  * If child has an unparameterized cheapest-total path, add that to
247                  * the unparameterized Append path we are constructing for the parent.
248                  * If not, there's no workable unparameterized path.
249                  */
250                 if (childrel->cheapest_total_path->param_info == NULL)
251                         subpaths = accumulate_append_subpath(subpaths,
252                                                                                                  childrel->cheapest_total_path);
253                 else
254                         subpaths_valid = false;
255
256                 /* Same idea, but for a partial plan. */
257                 if (childrel->partial_pathlist != NIL)
258                         partial_subpaths = accumulate_append_subpath(partial_subpaths,
259                                                                                                                  linitial(childrel->partial_pathlist));
260                 else
261                         partial_subpaths_valid = false;
262
263                 /*
264                  * Collect lists of all the available path orderings and
265                  * parameterizations for all the children.  We use these as a
266                  * heuristic to indicate which sort orderings and parameterizations we
267                  * should build Append and MergeAppend paths for.
268                  */
269                 foreach(lcp, childrel->pathlist)
270                 {
271                         Path       *childpath = (Path *) lfirst(lcp);
272                         List       *childkeys = childpath->pathkeys;
273                         Relids          childouter = PATH_REQ_OUTER(childpath);
274
275                         /* Unsorted paths don't contribute to pathkey list */
276                         if (childkeys != NIL)
277                         {
278                                 ListCell   *lpk;
279                                 bool            found = false;
280
281                                 /* Have we already seen this ordering? */
282                                 foreach(lpk, all_child_pathkeys)
283                                 {
284                                         List       *existing_pathkeys = (List *) lfirst(lpk);
285
286                                         if (compare_pathkeys(existing_pathkeys,
287                                                                                  childkeys) == PATHKEYS_EQUAL)
288                                         {
289                                                 found = true;
290                                                 break;
291                                         }
292                                 }
293                                 if (!found)
294                                 {
295                                         /* No, so add it to all_child_pathkeys */
296                                         all_child_pathkeys = lappend(all_child_pathkeys,
297                                                                                                  childkeys);
298                                 }
299                         }
300
301                         /* Unparameterized paths don't contribute to param-set list */
302                         if (childouter)
303                         {
304                                 ListCell   *lco;
305                                 bool            found = false;
306
307                                 /* Have we already seen this param set? */
308                                 foreach(lco, all_child_outers)
309                                 {
310                                         Relids          existing_outers = (Relids) lfirst(lco);
311
312                                         if (bms_equal(existing_outers, childouter))
313                                         {
314                                                 found = true;
315                                                 break;
316                                         }
317                                 }
318                                 if (!found)
319                                 {
320                                         /* No, so add it to all_child_outers */
321                                         all_child_outers = lappend(all_child_outers,
322                                                                                            childouter);
323                                 }
324                         }
325                 }
326         }
327
328         /*
329          * If we found unparameterized paths for all children, build an unordered,
330          * unparameterized Append path for the rel.  (Note: this is correct even
331          * if we have zero or one live subpath due to constraint exclusion.)
332          */
333         if (subpaths_valid)
334                 add_path(rel, (Path *) create_append_path(rel, subpaths, NULL, 0,
335                                                                                                   partitioned_rels));
336
337         /*
338          * Consider an append of partial unordered, unparameterized partial paths.
339          */
340         if (partial_subpaths_valid)
341         {
342                 AppendPath *appendpath;
343                 ListCell   *lc;
344                 int                     parallel_workers = 0;
345
346                 /*
347                  * Decide on the number of workers to request for this append path.
348                  * For now, we just use the maximum value from among the members.  It
349                  * might be useful to use a higher number if the Append node were
350                  * smart enough to spread out the workers, but it currently isn't.
351                  */
352                 foreach(lc, partial_subpaths)
353                 {
354                         Path       *path = lfirst(lc);
355
356                         parallel_workers = Max(parallel_workers, path->parallel_workers);
357                 }
358                 Assert(parallel_workers > 0);
359
360                 /* Generate a partial append path. */
361                 appendpath = create_append_path(rel, partial_subpaths, NULL,
362                                                                                 parallel_workers, partitioned_rels);
363                 add_partial_path(rel, (Path *) appendpath);
364         }
365
366         /*
367          * Also build unparameterized MergeAppend paths based on the collected
368          * list of child pathkeys.
369          */
370         if (subpaths_valid)
371                 generate_mergeappend_paths(root, rel, live_childrels,
372                                                                    all_child_pathkeys,
373                                                                    partitioned_rels);
374
375         /*
376          * Build Append paths for each parameterization seen among the child rels.
377          * (This may look pretty expensive, but in most cases of practical
378          * interest, the child rels will expose mostly the same parameterizations,
379          * so that not that many cases actually get considered here.)
380          *
381          * The Append node itself cannot enforce quals, so all qual checking must
382          * be done in the child paths.  This means that to have a parameterized
383          * Append path, we must have the exact same parameterization for each
384          * child path; otherwise some children might be failing to check the
385          * moved-down quals.  To make them match up, we can try to increase the
386          * parameterization of lesser-parameterized paths.
387          */
388         foreach(l, all_child_outers)
389         {
390                 Relids          required_outer = (Relids) lfirst(l);
391                 ListCell   *lcr;
392
393                 /* Select the child paths for an Append with this parameterization */
394                 subpaths = NIL;
395                 subpaths_valid = true;
396                 foreach(lcr, live_childrels)
397                 {
398                         RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
399                         Path       *subpath;
400
401                         subpath = get_cheapest_parameterized_child_path(root,
402                                                                                                                         childrel,
403                                                                                                                         required_outer);
404                         if (subpath == NULL)
405                         {
406                                 /* failed to make a suitable path for this child */
407                                 subpaths_valid = false;
408                                 break;
409                         }
410                         subpaths = accumulate_append_subpath(subpaths, subpath);
411                 }
412
413                 if (subpaths_valid)
414                         add_path(rel, (Path *)
415                                          create_append_path(rel, subpaths, required_outer, 0,
416                                                                                 partitioned_rels));
417         }
418 }
419
420
421 /*
422  * generate_mergeappend_paths
423  *              Generate MergeAppend paths for an append relation
424  *
425  * Generate a path for each ordering (pathkey list) appearing in
426  * all_child_pathkeys.
427  *
428  * We consider both cheapest-startup and cheapest-total cases, ie, for each
429  * interesting ordering, collect all the cheapest startup subpaths and all the
430  * cheapest total paths, and build a MergeAppend path for each case.
431  *
432  * We don't currently generate any parameterized MergeAppend paths.  While
433  * it would not take much more code here to do so, it's very unclear that it
434  * is worth the planning cycles to investigate such paths: there's little
435  * use for an ordered path on the inside of a nestloop.  In fact, it's likely
436  * that the current coding of add_path would reject such paths out of hand,
437  * because add_path gives no credit for sort ordering of parameterized paths,
438  * and a parameterized MergeAppend is going to be more expensive than the
439  * corresponding parameterized Append path.  If we ever try harder to support
440  * parameterized mergejoin plans, it might be worth adding support for
441  * parameterized MergeAppends to feed such joins.  (See notes in
442  * optimizer/README for why that might not ever happen, though.)
443  */
444 static void
445 generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
446                                                    List *live_childrels,
447                                                    List *all_child_pathkeys,
448                                                    List *partitioned_rels)
449 {
450         ListCell   *lcp;
451
452         foreach(lcp, all_child_pathkeys)
453         {
454                 List       *pathkeys = (List *) lfirst(lcp);
455                 List       *startup_subpaths = NIL;
456                 List       *total_subpaths = NIL;
457                 bool            startup_neq_total = false;
458                 ListCell   *lcr;
459
460                 /* Select the child paths for this ordering... */
461                 foreach(lcr, live_childrels)
462                 {
463                         RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
464                         Path       *cheapest_startup,
465                                            *cheapest_total;
466
467                         /* Locate the right paths, if they are available. */
468                         cheapest_startup =
469                                 get_cheapest_path_for_pathkeys(childrel->pathlist,
470                                                                                            pathkeys,
471                                                                                            NULL,
472                                                                                            STARTUP_COST,
473                                                                                            false);
474                         cheapest_total =
475                                 get_cheapest_path_for_pathkeys(childrel->pathlist,
476                                                                                            pathkeys,
477                                                                                            NULL,
478                                                                                            TOTAL_COST,
479                                                                                            false);
480
481                         /*
482                          * If we can't find any paths with the right order just use the
483                          * cheapest-total path; we'll have to sort it later.
484                          */
485                         if (cheapest_startup == NULL || cheapest_total == NULL)
486                         {
487                                 cheapest_startup = cheapest_total =
488                                         childrel->cheapest_total_path;
489                                 /* Assert we do have an unparameterized path for this child */
490                                 Assert(cheapest_total->param_info == NULL);
491                         }
492
493                         /*
494                          * Notice whether we actually have different paths for the
495                          * "cheapest" and "total" cases; frequently there will be no point
496                          * in two create_merge_append_path() calls.
497                          */
498                         if (cheapest_startup != cheapest_total)
499                                 startup_neq_total = true;
500
501                         startup_subpaths =
502                                 accumulate_append_subpath(startup_subpaths, cheapest_startup);
503                         total_subpaths =
504                                 accumulate_append_subpath(total_subpaths, cheapest_total);
505                 }
506
507                 /* ... and build the MergeAppend paths */
508                 add_path(rel, (Path *) create_merge_append_path(root,
509                                                                                                                 rel,
510                                                                                                                 startup_subpaths,
511                                                                                                                 pathkeys,
512                                                                                                                 NULL,
513                                                                                                                 partitioned_rels));
514                 if (startup_neq_total)
515                         add_path(rel, (Path *) create_merge_append_path(root,
516                                                                                                                         rel,
517                                                                                                                         total_subpaths,
518                                                                                                                         pathkeys,
519                                                                                                                         NULL,
520                                                                                                                         partitioned_rels));
521         }
522 }
523
524
525 /*
526  * get_cheapest_parameterized_child_path
527  *              Get cheapest path for this relation that has exactly the requested
528  *              parameterization.
529  *
530  * Returns NULL if unable to create such a path.
531  */
532 static Path *
533 get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
534                                                                           Relids required_outer)
535 {
536         Path       *cheapest;
537         ListCell   *lc;
538
539         /*
540          * Look up the cheapest existing path with no more than the needed
541          * parameterization.  If it has exactly the needed parameterization, we're
542          * done.
543          */
544         cheapest = get_cheapest_path_for_pathkeys(rel->pathlist,
545                                                                                           NIL,
546                                                                                           required_outer,
547                                                                                           TOTAL_COST,
548                                                                                           false);
549         Assert(cheapest != NULL);
550         if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
551                 return cheapest;
552
553         /*
554          * Otherwise, we can "reparameterize" an existing path to match the given
555          * parameterization, which effectively means pushing down additional
556          * joinquals to be checked within the path's scan.  However, some existing
557          * paths might check the available joinquals already while others don't;
558          * therefore, it's not clear which existing path will be cheapest after
559          * reparameterization.  We have to go through them all and find out.
560          */
561         cheapest = NULL;
562         foreach(lc, rel->pathlist)
563         {
564                 Path       *path = (Path *) lfirst(lc);
565
566                 /* Can't use it if it needs more than requested parameterization */
567                 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
568                         continue;
569
570                 /*
571                  * Reparameterization can only increase the path's cost, so if it's
572                  * already more expensive than the current cheapest, forget it.
573                  */
574                 if (cheapest != NULL &&
575                         compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
576                         continue;
577
578                 /* Reparameterize if needed, then recheck cost */
579                 if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
580                 {
581                         path = reparameterize_path(root, path, required_outer, 1.0);
582                         if (path == NULL)
583                                 continue;               /* failed to reparameterize this one */
584                         Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
585
586                         if (cheapest != NULL &&
587                                 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
588                                 continue;
589                 }
590
591                 /* We have a new best path */
592                 cheapest = path;
593         }
594
595         /* Return the best path, or NULL if we found no suitable candidate */
596         return cheapest;
597 }
598
599
600 /*
601  * accumulate_append_subpath
602  *              Add a subpath to the list being built for an Append or MergeAppend
603  *
604  * It's possible that the child is itself an Append or MergeAppend path, in
605  * which case we can "cut out the middleman" and just add its child paths to
606  * our own list.  (We don't try to do this earlier because we need to apply
607  * both levels of transformation to the quals.)
608  *
609  * Note that if we omit a child MergeAppend in this way, we are effectively
610  * omitting a sort step, which seems fine: if the parent is to be an Append,
611  * its result would be unsorted anyway, while if the parent is to be a
612  * MergeAppend, there's no point in a separate sort on a child.
613  */
614 static List *
615 accumulate_append_subpath(List *subpaths, Path *path)
616 {
617         if (IsA(path, AppendPath))
618         {
619                 AppendPath *apath = (AppendPath *) path;
620
621                 /* list_copy is important here to avoid sharing list substructure */
622                 return list_concat(subpaths, list_copy(apath->subpaths));
623         }
624         else if (IsA(path, MergeAppendPath))
625         {
626                 MergeAppendPath *mpath = (MergeAppendPath *) path;
627
628                 /* list_copy is important here to avoid sharing list substructure */
629                 return list_concat(subpaths, list_copy(mpath->subpaths));
630         }
631         else
632                 return lappend(subpaths, path);
633 }
634
635
636 /*
637  * standard_join_search
638  *        Find possible joinpaths for a query by successively finding ways
639  *        to join component relations into join relations.
640  *
641  * 'levels_needed' is the number of iterations needed, ie, the number of
642  *              independent jointree items in the query.  This is > 1.
643  *
644  * 'initial_rels' is a list of RelOptInfo nodes for each independent
645  *              jointree item.  These are the components to be joined together.
646  *              Note that levels_needed == list_length(initial_rels).
647  *
648  * Returns the final level of join relations, i.e., the relation that is
649  * the result of joining all the original relations together.
650  * At least one implementation path must be provided for this relation and
651  * all required sub-relations.
652  *
653  * To support loadable plugins that modify planner behavior by changing the
654  * join searching algorithm, we provide a hook variable that lets a plugin
655  * replace or supplement this function.  Any such hook must return the same
656  * final join relation as the standard code would, but it might have a
657  * different set of implementation paths attached, and only the sub-joinrels
658  * needed for these paths need have been instantiated.
659  *
660  * Note to plugin authors: the functions invoked during standard_join_search()
661  * modify root->join_rel_list and root->join_rel_hash.  If you want to do more
662  * than one join-order search, you'll probably need to save and restore the
663  * original states of those data structures.  See geqo_eval() for an example.
664  */
665 RelOptInfo *
666 standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
667 {
668         int                     lev;
669         RelOptInfo *rel;
670
671         /*
672          * This function cannot be invoked recursively within any one planning
673          * problem, so join_rel_level[] can't be in use already.
674          */
675         Assert(root->join_rel_level == NULL);
676
677         /*
678          * We employ a simple "dynamic programming" algorithm: we first find all
679          * ways to build joins of two jointree items, then all ways to build joins
680          * of three items (from two-item joins and single items), then four-item
681          * joins, and so on until we have considered all ways to join all the
682          * items into one rel.
683          *
684          * root->join_rel_level[j] is a list of all the j-item rels.  Initially we
685          * set root->join_rel_level[1] to represent all the single-jointree-item
686          * relations.
687          */
688         root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
689
690         root->join_rel_level[1] = initial_rels;
691
692         for (lev = 2; lev <= levels_needed; lev++)
693         {
694                 ListCell   *lc;
695
696                 /*
697                  * Determine all possible pairs of relations to be joined at this
698                  * level, and build paths for making each one from every available
699                  * pair of lower-level relations.
700                  */
701                 join_search_one_level(root, lev);
702
703                 /*
704                  * Run generate_gather_paths() for each just-processed joinrel.  We
705                  * could not do this earlier because both regular and partial paths
706                  * can get added to a particular joinrel at multiple times within
707                  * join_search_one_level.  After that, we're done creating paths for
708                  * the joinrel, so run set_cheapest().
709                  */
710                 foreach(lc, root->join_rel_level[lev])
711                 {
712                         rel = (RelOptInfo *) lfirst(lc);
713
714                         /* Create GatherPaths for any useful partial paths for rel */
715                         generate_gather_paths(root, rel);
716
717                         /* Find and save the cheapest paths for this rel */
718                         set_cheapest(rel);
719
720 #ifdef OPTIMIZER_DEBUG
721                         debug_print_rel(root, rel);
722 #endif
723                 }
724         }
725
726         /*
727          * We should have a single rel at the final level.
728          */
729         if (root->join_rel_level[levels_needed] == NIL)
730                 elog(ERROR, "failed to build any %d-way joins", levels_needed);
731         Assert(list_length(root->join_rel_level[levels_needed]) == 1);
732
733         rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
734
735         root->join_rel_level = NULL;
736
737         return rel;
738 }
739
740
741 /*
742  * join_search_one_level
743  *        Consider ways to produce join relations containing exactly 'level'
744  *        jointree items.  (This is one step of the dynamic-programming method
745  *        embodied in standard_join_search.)  Join rel nodes for each feasible
746  *        combination of lower-level rels are created and returned in a list.
747  *        Implementation paths are created for each such joinrel, too.
748  *
749  * level: level of rels we want to make this time
750  * root->join_rel_level[j], 1 <= j < level, is a list of rels containing j items
751  *
752  * The result is returned in root->join_rel_level[level].
753  */
754 void
755 join_search_one_level(PlannerInfo *root, int level)
756 {
757         List      **joinrels = root->join_rel_level;
758         ListCell   *r;
759         int                     k;
760
761         Assert(joinrels[level] == NIL);
762
763         /* Set join_cur_level so that new joinrels are added to proper list */
764         root->join_cur_level = level;
765
766         /*
767          * First, consider left-sided and right-sided plans, in which rels of
768          * exactly level-1 member relations are joined against initial relations.
769          * We prefer to join using join clauses, but if we find a rel of level-1
770          * members that has no join clauses, we will generate Cartesian-product
771          * joins against all initial rels not already contained in it.
772          */
773         foreach(r, joinrels[level - 1])
774         {
775                 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
776
777                 if (old_rel->joininfo != NIL || old_rel->has_eclass_joins ||
778                         has_join_restriction(root, old_rel))
779                 {
780                         /*
781                          * There are join clauses or join order restrictions relevant to
782                          * this rel, so consider joins between this rel and (only) those
783                          * initial rels it is linked to by a clause or restriction.
784                          *
785                          * At level 2 this condition is symmetric, so there is no need to
786                          * look at initial rels before this one in the list; we already
787                          * considered such joins when we were at the earlier rel.  (The
788                          * mirror-image joins are handled automatically by make_join_rel.)
789                          * In later passes (level > 2), we join rels of the previous level
790                          * to each initial rel they don't already include but have a join
791                          * clause or restriction with.
792                          */
793                         ListCell   *other_rels;
794
795                         if (level == 2)         /* consider remaining initial rels */
796                                 other_rels = lnext(r);
797                         else                            /* consider all initial rels */
798                                 other_rels = list_head(joinrels[1]);
799
800                         make_rels_by_clause_joins(root,
801                                                                           old_rel,
802                                                                           other_rels);
803                 }
804                 else
805                 {
806                         /*
807                          * Oops, we have a relation that is not joined to any other
808                          * relation, either directly or by join-order restrictions.
809                          * Cartesian product time.
810                          *
811                          * We consider a cartesian product with each not-already-included
812                          * initial rel, whether it has other join clauses or not.  At
813                          * level 2, if there are two or more clauseless initial rels, we
814                          * will redundantly consider joining them in both directions; but
815                          * such cases aren't common enough to justify adding complexity to
816                          * avoid the duplicated effort.
817                          */
818                         make_rels_by_clauseless_joins(root,
819                                                                                   old_rel,
820                                                                                   list_head(joinrels[1]));
821                 }
822         }
823
824         /*
825          * Now, consider "bushy plans" in which relations of k initial rels are
826          * joined to relations of level-k initial rels, for 2 <= k <= level-2.
827          *
828          * We only consider bushy-plan joins for pairs of rels where there is a
829          * suitable join clause (or join order restriction), in order to avoid
830          * unreasonable growth of planning time.
831          */
832         for (k = 2;; k++)
833         {
834                 int                     other_level = level - k;
835
836                 /*
837                  * Since make_join_rel(x, y) handles both x,y and y,x cases, we only
838                  * need to go as far as the halfway point.
839                  */
840                 if (k > other_level)
841                         break;
842
843                 foreach(r, joinrels[k])
844                 {
845                         RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
846                         ListCell   *other_rels;
847                         ListCell   *r2;
848
849                         /*
850                          * We can ignore relations without join clauses here, unless they
851                          * participate in join-order restrictions --- then we might have
852                          * to force a bushy join plan.
853                          */
854                         if (old_rel->joininfo == NIL && !old_rel->has_eclass_joins &&
855                                 !has_join_restriction(root, old_rel))
856                                 continue;
857
858                         if (k == other_level)
859                                 other_rels = lnext(r);  /* only consider remaining rels */
860                         else
861                                 other_rels = list_head(joinrels[other_level]);
862
863                         for_each_cell(r2, other_rels)
864                         {
865                                 RelOptInfo *new_rel = (RelOptInfo *) lfirst(r2);
866
867                                 if (!bms_overlap(old_rel->relids, new_rel->relids))
868                                 {
869                                         /*
870                                          * OK, we can build a rel of the right level from this
871                                          * pair of rels.  Do so if there is at least one relevant
872                                          * join clause or join order restriction.
873                                          */
874                                         if (have_relevant_joinclause(root, old_rel, new_rel) ||
875                                                 have_join_order_restriction(root, old_rel, new_rel))
876                                         {
877                                                 (void) make_join_rel(root, old_rel, new_rel);
878                                         }
879                                 }
880                         }
881                 }
882         }
883
884         /*----------
885          * Last-ditch effort: if we failed to find any usable joins so far, force
886          * a set of cartesian-product joins to be generated.  This handles the
887          * special case where all the available rels have join clauses but we
888          * cannot use any of those clauses yet.  This can only happen when we are
889          * considering a join sub-problem (a sub-joinlist) and all the rels in the
890          * sub-problem have only join clauses with rels outside the sub-problem.
891          * An example is
892          *
893          *              SELECT ... FROM a INNER JOIN b ON TRUE, c, d, ...
894          *              WHERE a.w = c.x and b.y = d.z;
895          *
896          * If the "a INNER JOIN b" sub-problem does not get flattened into the
897          * upper level, we must be willing to make a cartesian join of a and b;
898          * but the code above will not have done so, because it thought that both
899          * a and b have joinclauses.  We consider only left-sided and right-sided
900          * cartesian joins in this case (no bushy).
901          *----------
902          */
903         if (joinrels[level] == NIL)
904         {
905                 /*
906                  * This loop is just like the first one, except we always call
907                  * make_rels_by_clauseless_joins().
908                  */
909                 foreach(r, joinrels[level - 1])
910                 {
911                         RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
912
913                         make_rels_by_clauseless_joins(root,
914                                                                                   old_rel,
915                                                                                   list_head(joinrels[1]));
916                 }
917
918                 /*----------
919                  * When special joins are involved, there may be no legal way
920                  * to make an N-way join for some values of N.  For example consider
921                  *
922                  * SELECT ... FROM t1 WHERE
923                  *       x IN (SELECT ... FROM t2,t3 WHERE ...) AND
924                  *       y IN (SELECT ... FROM t4,t5 WHERE ...)
925                  *
926                  * We will flatten this query to a 5-way join problem, but there are
927                  * no 4-way joins that join_is_legal() will consider legal.  We have
928                  * to accept failure at level 4 and go on to discover a workable
929                  * bushy plan at level 5.
930                  *
931                  * However, if there are no special joins and no lateral references
932                  * then join_is_legal() should never fail, and so the following sanity
933                  * check is useful.
934                  *----------
935                  */
936                 if (joinrels[level] == NIL &&
937                         root->join_info_list == NIL &&
938                         !root->hasLateralRTEs)
939                         elog(ERROR, "failed to build any %d-way joins", level);
940         }
941 }
942
943
944 /*
945  * make_rels_by_clause_joins
946  *        Build joins between the given relation 'old_rel' and other relations
947  *        that participate in join clauses that 'old_rel' also participates in
948  *        (or participate in join-order restrictions with it).
949  *        The join rels are returned in root->join_rel_level[join_cur_level].
950  *
951  * Note: at levels above 2 we will generate the same joined relation in
952  * multiple ways --- for example (a join b) join c is the same RelOptInfo as
953  * (b join c) join a, though the second case will add a different set of Paths
954  * to it.  This is the reason for using the join_rel_level mechanism, which
955  * automatically ensures that each new joinrel is only added to the list once.
956  *
957  * 'old_rel' is the relation entry for the relation to be joined
958  * 'other_rels': the first cell in a linked list containing the other
959  * rels to be considered for joining
960  *
961  * Currently, this is only used with initial rels in other_rels, but it
962  * will work for joining to joinrels too.
963  */
964 static void
965 make_rels_by_clause_joins(PlannerInfo *root,
966                                                   RelOptInfo *old_rel,
967                                                   ListCell *other_rels)
968 {
969         ListCell   *l;
970
971         for_each_cell(l, other_rels)
972         {
973                 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
974
975                 if (!bms_overlap(old_rel->relids, other_rel->relids) &&
976                         (have_relevant_joinclause(root, old_rel, other_rel) ||
977                          have_join_order_restriction(root, old_rel, other_rel)))
978                 {
979                         (void) make_join_rel(root, old_rel, other_rel);
980                 }
981         }
982 }
983
984
985 /*
986  * make_rels_by_clauseless_joins
987  *        Given a relation 'old_rel' and a list of other relations
988  *        'other_rels', create a join relation between 'old_rel' and each
989  *        member of 'other_rels' that isn't already included in 'old_rel'.
990  *        The join rels are returned in root->join_rel_level[join_cur_level].
991  *
992  * 'old_rel' is the relation entry for the relation to be joined
993  * 'other_rels': the first cell of a linked list containing the
994  * other rels to be considered for joining
995  *
996  * Currently, this is only used with initial rels in other_rels, but it would
997  * work for joining to joinrels too.
998  */
999 static void
1000 make_rels_by_clauseless_joins(PlannerInfo *root,
1001                                                           RelOptInfo *old_rel,
1002                                                           ListCell *other_rels)
1003 {
1004         ListCell   *l;
1005
1006         for_each_cell(l, other_rels)
1007         {
1008                 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
1009
1010                 if (!bms_overlap(other_rel->relids, old_rel->relids))
1011                 {
1012                         (void) make_join_rel(root, old_rel, other_rel);
1013                 }
1014         }
1015 }
1016
1017
1018 /*
1019  * join_is_legal
1020  *         Determine whether a proposed join is legal given the query's
1021  *         join order constraints; and if it is, determine the join type.
1022  *
1023  * Caller must supply not only the two rels, but the union of their relids.
1024  * (We could simplify the API by computing joinrelids locally, but this
1025  * would be redundant work in the normal path through make_join_rel.)
1026  *
1027  * On success, *sjinfo_p is set to NULL if this is to be a plain inner join,
1028  * else it's set to point to the associated SpecialJoinInfo node.  Also,
1029  * *reversed_p is set TRUE if the given relations need to be swapped to
1030  * match the SpecialJoinInfo node.
1031  */
1032 static bool
1033 join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
1034                           Relids joinrelids,
1035                           SpecialJoinInfo **sjinfo_p, bool *reversed_p)
1036 {
1037         SpecialJoinInfo *match_sjinfo;
1038         bool            reversed;
1039         bool            unique_ified;
1040         bool            must_be_leftjoin;
1041         ListCell   *l;
1042
1043         /*
1044          * Ensure output params are set on failure return.  This is just to
1045          * suppress uninitialized-variable warnings from overly anal compilers.
1046          */
1047         *sjinfo_p = NULL;
1048         *reversed_p = false;
1049
1050         /*
1051          * If we have any special joins, the proposed join might be illegal; and
1052          * in any case we have to determine its join type.  Scan the join info
1053          * list for matches and conflicts.
1054          */
1055         match_sjinfo = NULL;
1056         reversed = false;
1057         unique_ified = false;
1058         must_be_leftjoin = false;
1059
1060         foreach(l, root->join_info_list)
1061         {
1062                 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1063
1064                 /*
1065                  * This special join is not relevant unless its RHS overlaps the
1066                  * proposed join.  (Check this first as a fast path for dismissing
1067                  * most irrelevant SJs quickly.)
1068                  */
1069                 if (!bms_overlap(sjinfo->min_righthand, joinrelids))
1070                         continue;
1071
1072                 /*
1073                  * Also, not relevant if proposed join is fully contained within RHS
1074                  * (ie, we're still building up the RHS).
1075                  */
1076                 if (bms_is_subset(joinrelids, sjinfo->min_righthand))
1077                         continue;
1078
1079                 /*
1080                  * Also, not relevant if SJ is already done within either input.
1081                  */
1082                 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
1083                         bms_is_subset(sjinfo->min_righthand, rel1->relids))
1084                         continue;
1085                 if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
1086                         bms_is_subset(sjinfo->min_righthand, rel2->relids))
1087                         continue;
1088
1089                 /*
1090                  * If it's a semijoin and we already joined the RHS to any other rels
1091                  * within either input, then we must have unique-ified the RHS at that
1092                  * point (see below).  Therefore the semijoin is no longer relevant in
1093                  * this join path.
1094                  */
1095                 if (sjinfo->jointype == JOIN_SEMI)
1096                 {
1097                         if (bms_is_subset(sjinfo->syn_righthand, rel1->relids) &&
1098                                 !bms_equal(sjinfo->syn_righthand, rel1->relids))
1099                                 continue;
1100                         if (bms_is_subset(sjinfo->syn_righthand, rel2->relids) &&
1101                                 !bms_equal(sjinfo->syn_righthand, rel2->relids))
1102                                 continue;
1103                 }
1104
1105                 /*
1106                  * If one input contains min_lefthand and the other contains
1107                  * min_righthand, then we can perform the SJ at this join.
1108                  *
1109                  * Reject if we get matches to more than one SJ; that implies we're
1110                  * considering something that's not really valid.
1111                  */
1112                 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
1113                         bms_is_subset(sjinfo->min_righthand, rel2->relids))
1114                 {
1115                         if (match_sjinfo)
1116                                 return false;   /* invalid join path */
1117                         match_sjinfo = sjinfo;
1118                         reversed = false;
1119                 }
1120                 else if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
1121                                  bms_is_subset(sjinfo->min_righthand, rel1->relids))
1122                 {
1123                         if (match_sjinfo)
1124                                 return false;   /* invalid join path */
1125                         match_sjinfo = sjinfo;
1126                         reversed = true;
1127                 }
1128                 else if (sjinfo->jointype == JOIN_SEMI &&
1129                                  bms_equal(sjinfo->syn_righthand, rel2->relids) &&
1130                                  create_unique_path(root, rel2, rel2->cheapest_total_path,
1131                                                                         sjinfo) != NULL)
1132                 {
1133                         /*----------
1134                          * For a semijoin, we can join the RHS to anything else by
1135                          * unique-ifying the RHS (if the RHS can be unique-ified).
1136                          * We will only get here if we have the full RHS but less
1137                          * than min_lefthand on the LHS.
1138                          *
1139                          * The reason to consider such a join path is exemplified by
1140                          *      SELECT ... FROM a,b WHERE (a.x,b.y) IN (SELECT c1,c2 FROM c)
1141                          * If we insist on doing this as a semijoin we will first have
1142                          * to form the cartesian product of A*B.  But if we unique-ify
1143                          * C then the semijoin becomes a plain innerjoin and we can join
1144                          * in any order, eg C to A and then to B.  When C is much smaller
1145                          * than A and B this can be a huge win.  So we allow C to be
1146                          * joined to just A or just B here, and then make_join_rel has
1147                          * to handle the case properly.
1148                          *
1149                          * Note that actually we'll allow unique-ified C to be joined to
1150                          * some other relation D here, too.  That is legal, if usually not
1151                          * very sane, and this routine is only concerned with legality not
1152                          * with whether the join is good strategy.
1153                          *----------
1154                          */
1155                         if (match_sjinfo)
1156                                 return false;   /* invalid join path */
1157                         match_sjinfo = sjinfo;
1158                         reversed = false;
1159                         unique_ified = true;
1160                 }
1161                 else if (sjinfo->jointype == JOIN_SEMI &&
1162                                  bms_equal(sjinfo->syn_righthand, rel1->relids) &&
1163                                  create_unique_path(root, rel1, rel1->cheapest_total_path,
1164                                                                         sjinfo) != NULL)
1165                 {
1166                         /* Reversed semijoin case */
1167                         if (match_sjinfo)
1168                                 return false;   /* invalid join path */
1169                         match_sjinfo = sjinfo;
1170                         reversed = true;
1171                         unique_ified = true;
1172                 }
1173                 else
1174                 {
1175                         /*
1176                          * Otherwise, the proposed join overlaps the RHS but isn't a valid
1177                          * implementation of this SJ.  But don't panic quite yet: the RHS
1178                          * violation might have occurred previously, in one or both input
1179                          * relations, in which case we must have previously decided that
1180                          * it was OK to commute some other SJ with this one.  If we need
1181                          * to perform this join to finish building up the RHS, rejecting
1182                          * it could lead to not finding any plan at all.  (This can occur
1183                          * because of the heuristics elsewhere in this file that postpone
1184                          * clauseless joins: we might not consider doing a clauseless join
1185                          * within the RHS until after we've performed other, validly
1186                          * commutable SJs with one or both sides of the clauseless join.)
1187                          * This consideration boils down to the rule that if both inputs
1188                          * overlap the RHS, we can allow the join --- they are either
1189                          * fully within the RHS, or represent previously-allowed joins to
1190                          * rels outside it.
1191                          */
1192                         if (bms_overlap(rel1->relids, sjinfo->min_righthand) &&
1193                                 bms_overlap(rel2->relids, sjinfo->min_righthand))
1194                                 continue;               /* assume valid previous violation of RHS */
1195
1196                         /*
1197                          * The proposed join could still be legal, but only if we're
1198                          * allowed to associate it into the RHS of this SJ.  That means
1199                          * this SJ must be a LEFT join (not SEMI or ANTI, and certainly
1200                          * not FULL) and the proposed join must not overlap the LHS.
1201                          */
1202                         if (sjinfo->jointype != JOIN_LEFT ||
1203                                 bms_overlap(joinrelids, sjinfo->min_lefthand))
1204                                 return false;   /* invalid join path */
1205
1206                         /*
1207                          * To be valid, the proposed join must be a LEFT join; otherwise
1208                          * it can't associate into this SJ's RHS.  But we may not yet have
1209                          * found the SpecialJoinInfo matching the proposed join, so we
1210                          * can't test that yet.  Remember the requirement for later.
1211                          */
1212                         must_be_leftjoin = true;
1213                 }
1214         }
1215
1216         /*
1217          * Fail if violated any SJ's RHS and didn't match to a LEFT SJ: the
1218          * proposed join can't associate into an SJ's RHS.
1219          *
1220          * Also, fail if the proposed join's predicate isn't strict; we're
1221          * essentially checking to see if we can apply outer-join identity 3, and
1222          * that's a requirement.  (This check may be redundant with checks in
1223          * make_outerjoininfo, but I'm not quite sure, and it's cheap to test.)
1224          */
1225         if (must_be_leftjoin &&
1226                 (match_sjinfo == NULL ||
1227                  match_sjinfo->jointype != JOIN_LEFT ||
1228                  !match_sjinfo->lhs_strict))
1229                 return false;                   /* invalid join path */
1230
1231         /*
1232          * We also have to check for constraints imposed by LATERAL references.
1233          */
1234         if (root->hasLateralRTEs)
1235         {
1236                 bool            lateral_fwd;
1237                 bool            lateral_rev;
1238                 Relids          join_lateral_rels;
1239
1240                 /*
1241                  * The proposed rels could each contain lateral references to the
1242                  * other, in which case the join is impossible.  If there are lateral
1243                  * references in just one direction, then the join has to be done with
1244                  * a nestloop with the lateral referencer on the inside.  If the join
1245                  * matches an SJ that cannot be implemented by such a nestloop, the
1246                  * join is impossible.
1247                  *
1248                  * Also, if the lateral reference is only indirect, we should reject
1249                  * the join; whatever rel(s) the reference chain goes through must be
1250                  * joined to first.
1251                  *
1252                  * Another case that might keep us from building a valid plan is the
1253                  * implementation restriction described by have_dangerous_phv().
1254                  */
1255                 lateral_fwd = bms_overlap(rel1->relids, rel2->lateral_relids);
1256                 lateral_rev = bms_overlap(rel2->relids, rel1->lateral_relids);
1257                 if (lateral_fwd && lateral_rev)
1258                         return false;           /* have lateral refs in both directions */
1259                 if (lateral_fwd)
1260                 {
1261                         /* has to be implemented as nestloop with rel1 on left */
1262                         if (match_sjinfo &&
1263                                 (reversed ||
1264                                  unique_ified ||
1265                                  match_sjinfo->jointype == JOIN_FULL))
1266                                 return false;   /* not implementable as nestloop */
1267                         /* check there is a direct reference from rel2 to rel1 */
1268                         if (!bms_overlap(rel1->relids, rel2->direct_lateral_relids))
1269                                 return false;   /* only indirect refs, so reject */
1270                         /* check we won't have a dangerous PHV */
1271                         if (have_dangerous_phv(root, rel1->relids, rel2->lateral_relids))
1272                                 return false;   /* might be unable to handle required PHV */
1273                 }
1274                 else if (lateral_rev)
1275                 {
1276                         /* has to be implemented as nestloop with rel2 on left */
1277                         if (match_sjinfo &&
1278                                 (!reversed ||
1279                                  unique_ified ||
1280                                  match_sjinfo->jointype == JOIN_FULL))
1281                                 return false;   /* not implementable as nestloop */
1282                         /* check there is a direct reference from rel1 to rel2 */
1283                         if (!bms_overlap(rel2->relids, rel1->direct_lateral_relids))
1284                                 return false;   /* only indirect refs, so reject */
1285                         /* check we won't have a dangerous PHV */
1286                         if (have_dangerous_phv(root, rel2->relids, rel1->lateral_relids))
1287                                 return false;   /* might be unable to handle required PHV */
1288                 }
1289
1290                 /*
1291                  * LATERAL references could also cause problems later on if we accept
1292                  * this join: if the join's minimum parameterization includes any rels
1293                  * that would have to be on the inside of an outer join with this join
1294                  * rel, then it's never going to be possible to build the complete
1295                  * query using this join.  We should reject this join not only because
1296                  * it'll save work, but because if we don't, the clauseless-join
1297                  * heuristics might think that legality of this join means that some
1298                  * other join rel need not be formed, and that could lead to failure
1299                  * to find any plan at all.  We have to consider not only rels that
1300                  * are directly on the inner side of an OJ with the joinrel, but also
1301                  * ones that are indirectly so, so search to find all such rels.
1302                  */
1303                 join_lateral_rels = min_join_parameterization(root, joinrelids,
1304                                                                                                           rel1, rel2);
1305                 if (join_lateral_rels)
1306                 {
1307                         Relids          join_plus_rhs = bms_copy(joinrelids);
1308                         bool            more;
1309
1310                         do
1311                         {
1312                                 more = false;
1313                                 foreach(l, root->join_info_list)
1314                                 {
1315                                         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1316
1317                                         if (bms_overlap(sjinfo->min_lefthand, join_plus_rhs) &&
1318                                                 !bms_is_subset(sjinfo->min_righthand, join_plus_rhs))
1319                                         {
1320                                                 join_plus_rhs = bms_add_members(join_plus_rhs,
1321                                                                                                                 sjinfo->min_righthand);
1322                                                 more = true;
1323                                         }
1324                                         /* full joins constrain both sides symmetrically */
1325                                         if (sjinfo->jointype == JOIN_FULL &&
1326                                                 bms_overlap(sjinfo->min_righthand, join_plus_rhs) &&
1327                                                 !bms_is_subset(sjinfo->min_lefthand, join_plus_rhs))
1328                                         {
1329                                                 join_plus_rhs = bms_add_members(join_plus_rhs,
1330                                                                                                                 sjinfo->min_lefthand);
1331                                                 more = true;
1332                                         }
1333                                 }
1334                         } while (more);
1335                         if (bms_overlap(join_plus_rhs, join_lateral_rels))
1336                                 return false;   /* will not be able to join to some RHS rel */
1337                 }
1338         }
1339
1340         /* Otherwise, it's a valid join */
1341         *sjinfo_p = match_sjinfo;
1342         *reversed_p = reversed;
1343         return true;
1344 }
1345
1346
1347 /*
1348  * has_join_restriction
1349  *              Detect whether the specified relation has join-order restrictions,
1350  *              due to being inside an outer join or an IN (sub-SELECT),
1351  *              or participating in any LATERAL references or multi-rel PHVs.
1352  *
1353  * Essentially, this tests whether have_join_order_restriction() could
1354  * succeed with this rel and some other one.  It's OK if we sometimes
1355  * say "true" incorrectly.  (Therefore, we don't bother with the relatively
1356  * expensive has_legal_joinclause test.)
1357  */
1358 static bool
1359 has_join_restriction(PlannerInfo *root, RelOptInfo *rel)
1360 {
1361         ListCell   *l;
1362
1363         if (rel->lateral_relids != NULL || rel->lateral_referencers != NULL)
1364                 return true;
1365
1366         foreach(l, root->placeholder_list)
1367         {
1368                 PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l);
1369
1370                 if (bms_is_subset(rel->relids, phinfo->ph_eval_at) &&
1371                         !bms_equal(rel->relids, phinfo->ph_eval_at))
1372                         return true;
1373         }
1374
1375         foreach(l, root->join_info_list)
1376         {
1377                 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1378
1379                 /* ignore full joins --- other mechanisms preserve their ordering */
1380                 if (sjinfo->jointype == JOIN_FULL)
1381                         continue;
1382
1383                 /* ignore if SJ is already contained in rel */
1384                 if (bms_is_subset(sjinfo->min_lefthand, rel->relids) &&
1385                         bms_is_subset(sjinfo->min_righthand, rel->relids))
1386                         continue;
1387
1388                 /* restricted if it overlaps LHS or RHS, but doesn't contain SJ */
1389                 if (bms_overlap(sjinfo->min_lefthand, rel->relids) ||
1390                         bms_overlap(sjinfo->min_righthand, rel->relids))
1391                         return true;
1392         }
1393
1394         return false;
1395 }
1396
1397
1398 /*
1399  * is_dummy_rel --- has relation been proven empty?
1400  */
1401 static bool
1402 is_dummy_rel(RelOptInfo *rel)
1403 {
1404         return IS_DUMMY_REL(rel);
1405 }
1406
1407
1408 /*
1409  * Mark a relation as proven empty.
1410  *
1411  * During GEQO planning, this can get invoked more than once on the same
1412  * baserel struct, so it's worth checking to see if the rel is already marked
1413  * dummy.
1414  *
1415  * Also, when called during GEQO join planning, we are in a short-lived
1416  * memory context.  We must make sure that the dummy path attached to a
1417  * baserel survives the GEQO cycle, else the baserel is trashed for future
1418  * GEQO cycles.  On the other hand, when we are marking a joinrel during GEQO,
1419  * we don't want the dummy path to clutter the main planning context.  Upshot
1420  * is that the best solution is to explicitly make the dummy path in the same
1421  * context the given RelOptInfo is in.
1422  */
1423 static void
1424 mark_dummy_rel(RelOptInfo *rel)
1425 {
1426         MemoryContext oldcontext;
1427
1428         /* Already marked? */
1429         if (is_dummy_rel(rel))
1430                 return;
1431
1432         /* No, so choose correct context to make the dummy path in */
1433         oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1434
1435         /* Set dummy size estimate */
1436         rel->rows = 0;
1437
1438         /* Evict any previously chosen paths */
1439         rel->pathlist = NIL;
1440         rel->partial_pathlist = NIL;
1441
1442         /* Set up the dummy path */
1443         add_path(rel, (Path *) create_append_path(rel, NIL, NULL, 0, NIL));
1444
1445         /* Set or update cheapest_total_path and related fields */
1446         set_cheapest(rel);
1447
1448         MemoryContextSwitchTo(oldcontext);
1449 }
1450
1451
1452 /*
1453  * restriction_is_constant_false --- is a restrictlist just false?
1454  *
1455  * In cases where a qual is provably constant false, eval_const_expressions
1456  * will generally have thrown away anything that's ANDed with it.  In outer
1457  * join situations this will leave us computing cartesian products only to
1458  * decide there's no match for an outer row, which is pretty stupid.  So,
1459  * we need to detect the case.
1460  *
1461  * If only_pushed_down is true, then consider only quals that are pushed-down
1462  * from the point of view of the joinrel.
1463  */
1464 static bool
1465 restriction_is_constant_false(List *restrictlist,
1466                                                           RelOptInfo *joinrel,
1467                                                           bool only_pushed_down)
1468 {
1469         ListCell   *lc;
1470
1471         /*
1472          * Despite the above comment, the restriction list we see here might
1473          * possibly have other members besides the FALSE constant, since other
1474          * quals could get "pushed down" to the outer join level.  So we check
1475          * each member of the list.
1476          */
1477         foreach(lc, restrictlist)
1478         {
1479                 RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
1480
1481                 if (only_pushed_down && !RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
1482                         continue;
1483
1484                 if (rinfo->clause && IsA(rinfo->clause, Const))
1485                 {
1486                         Const      *con = (Const *) rinfo->clause;
1487
1488                         /* constant NULL is as good as constant FALSE for our purposes */
1489                         if (con->constisnull)
1490                                 return true;
1491                         if (!DatumGetBool(con->constvalue))
1492                                 return true;
1493                 }
1494         }
1495         return false;
1496 }