OSDN Git Service

Fix segfault when use Set, Rows and Parallel hints together
[pghintplan/pg_hint_plan.git] / core.c
1 /*-------------------------------------------------------------------------
2  *
3  * core.c
4  *        Routines copied from PostgreSQL core distribution.
5  *
6
7  * The main purpose of this files is having access to static functions in core.
8  * Another purpose is tweaking functions behavior by replacing part of them by
9  * macro definitions. See at the end of pg_hint_plan.c for details. Anyway,
10  * this file *must* contain required functions without making any change.
11  *
12  * This file contains the following functions from corresponding files.
13  *
14  * src/backend/optimizer/path/allpaths.c
15  *
16  *      static functions:
17  *     set_plain_rel_pathlist()
18  *     create_plain_partial_paths()
19  *     set_append_rel_pathlist()
20  *     add_paths_to_append_rel()
21  *     generate_mergeappend_paths()
22  *     get_cheapest_parameterized_child_path()
23  *     accumulate_append_subpath()
24  *
25  *  public functions:
26  *     standard_join_search(): This funcion is not static. The reason for
27  *        including this function is make_rels_by_clause_joins. In order to
28  *        avoid generating apparently unwanted join combination, we decided to
29  *        change the behavior of make_join_rel, which is called under this
30  *        function.
31  *
32  * src/backend/optimizer/path/joinrels.c
33  *
34  *      public functions:
35  *     join_search_one_level(): We have to modify this to call my definition of
36  *                  make_rels_by_clause_joins.
37  *
38  *      static functions:
39  *     make_rels_by_clause_joins()
40  *     make_rels_by_clauseless_joins()
41  *     join_is_legal()
42  *     has_join_restriction()
43  *     mark_dummy_rel()
44  *     restriction_is_constant_false()
45  *
46  *
47  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
48  * Portions Copyright (c) 1994, Regents of the University of California
49  *
50  *-------------------------------------------------------------------------
51  */
52
53
54 /*
55  * set_plain_rel_pathlist
56  *        Build access paths for a plain relation (no subquery, no inheritance)
57  */
58 static void
59 set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
60 {
61         Relids          required_outer;
62
63         /*
64          * We don't support pushing join clauses into the quals of a seqscan, but
65          * it could still have required parameterization due to LATERAL refs in
66          * its tlist.
67          */
68         required_outer = rel->lateral_relids;
69
70         /* Consider sequential scan */
71         add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
72
73         /* If appropriate, consider parallel sequential scan */
74         if (rel->consider_parallel && required_outer == NULL)
75                 create_plain_partial_paths(root, rel);
76
77         /* Consider index scans */
78         create_index_paths(root, rel);
79
80         /* Consider TID scans */
81         create_tidscan_paths(root, rel);
82 }
83
84
85 /*
86  * create_plain_partial_paths
87  *        Build partial access paths for parallel scan of a plain relation
88  */
89 static void
90 create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
91 {
92         int                     parallel_workers;
93
94         parallel_workers = compute_parallel_worker(rel, rel->pages, -1);
95
96         /* If any limit was set to zero, the user doesn't want a parallel scan. */
97         if (parallel_workers <= 0)
98                 return;
99
100         /* Add an unordered partial path based on a parallel sequential scan. */
101         add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
102 }
103
104
105 /*
106  * set_append_rel_pathlist
107  *        Build access paths for an "append relation"
108  */
109 static void
110 set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
111                                                 Index rti, RangeTblEntry *rte)
112 {
113         int                     parentRTindex = rti;
114         List       *live_childrels = NIL;
115         ListCell   *l;
116
117         /*
118          * Generate access paths for each member relation, and remember the
119          * non-dummy children.
120          */
121         foreach(l, root->append_rel_list)
122         {
123                 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
124                 int                     childRTindex;
125                 RangeTblEntry *childRTE;
126                 RelOptInfo *childrel;
127
128                 /* append_rel_list contains all append rels; ignore others */
129                 if (appinfo->parent_relid != parentRTindex)
130                         continue;
131
132                 /* Re-locate the child RTE and RelOptInfo */
133                 childRTindex = appinfo->child_relid;
134                 childRTE = root->simple_rte_array[childRTindex];
135                 childrel = root->simple_rel_array[childRTindex];
136
137                 /*
138                  * If set_append_rel_size() decided the parent appendrel was
139                  * parallel-unsafe at some point after visiting this child rel, we
140                  * need to propagate the unsafety marking down to the child, so that
141                  * we don't generate useless partial paths for it.
142                  */
143                 if (!rel->consider_parallel)
144                         childrel->consider_parallel = false;
145
146                 /*
147                  * Compute the child's access paths.
148                  */
149                 set_rel_pathlist(root, childrel, childRTindex, childRTE);
150
151                 /*
152                  * If child is dummy, ignore it.
153                  */
154                 if (IS_DUMMY_REL(childrel))
155                         continue;
156
157                 /*
158                  * Child is live, so add it to the live_childrels list for use below.
159                  */
160                 live_childrels = lappend(live_childrels, childrel);
161         }
162
163         /* Add paths to the "append" relation. */
164         add_paths_to_append_rel(root, rel, live_childrels);
165 }
166
167 /*
168  * add_paths_to_append_rel
169  *              Generate paths for given "append" relation given the set of non-dummy
170  *              child rels.
171  *
172  * The function collects all parameterizations and orderings supported by the
173  * non-dummy children. For every such parameterization or ordering, it creates
174  * an append path collecting one path from each non-dummy child with given
175  * parameterization or ordering. Similarly it collects partial paths from
176  * non-dummy children to create partial append paths.
177  */
178 static void
179 add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
180                                                 List *live_childrels)
181 {
182         List       *subpaths = NIL;
183         bool            subpaths_valid = true;
184         List       *partial_subpaths = NIL;
185         bool            partial_subpaths_valid = true;
186         List       *all_child_pathkeys = NIL;
187         List       *all_child_outers = NIL;
188         ListCell   *l;
189         List       *partitioned_rels = NIL;
190         RangeTblEntry *rte;
191         bool            build_partitioned_rels = false;
192
193         /*
194          * A plain relation will already have a PartitionedChildRelInfo if it is
195          * partitioned.  For a subquery RTE, no PartitionedChildRelInfo exists; we
196          * collect all partitioned_rels associated with any child.  (This assumes
197          * that we don't need to look through multiple levels of subquery RTEs; if
198          * we ever do, we could create a PartitionedChildRelInfo with the
199          * accumulated list of partitioned_rels which would then be found when
200          * populated our parent rel with paths.  For the present, that appears to
201          * be unnecessary.)
202          */
203         rte = planner_rt_fetch(rel->relid, root);
204         switch (rte->rtekind)
205         {
206                 case RTE_RELATION:
207                         if (rte->relkind == RELKIND_PARTITIONED_TABLE)
208                         {
209                                 partitioned_rels =
210                                         get_partitioned_child_rels(root, rel->relid);
211                                 Assert(list_length(partitioned_rels) >= 1);
212                         }
213                         break;
214                 case RTE_SUBQUERY:
215                         build_partitioned_rels = true;
216                         break;
217                 default:
218                         elog(ERROR, "unexpected rtekind: %d", (int) rte->rtekind);
219         }
220
221         /*
222          * For every non-dummy child, remember the cheapest path.  Also, identify
223          * all pathkeys (orderings) and parameterizations (required_outer sets)
224          * available for the non-dummy member relations.
225          */
226         foreach(l, live_childrels)
227         {
228                 RelOptInfo *childrel = lfirst(l);
229                 ListCell   *lcp;
230
231                 /*
232                  * If we need to build partitioned_rels, accumulate the partitioned
233                  * rels for this child.
234                  */
235                 if (build_partitioned_rels)
236                 {
237                         List       *cprels;
238
239                         cprels = get_partitioned_child_rels(root, childrel->relid);
240                         partitioned_rels = list_concat(partitioned_rels,
241                                                                                    list_copy(cprels));
242                 }
243
244                 /*
245                  * If child has an unparameterized cheapest-total path, add that to
246                  * the unparameterized Append path we are constructing for the parent.
247                  * If not, there's no workable unparameterized path.
248                  */
249                 if (childrel->cheapest_total_path->param_info == NULL)
250                         subpaths = accumulate_append_subpath(subpaths,
251                                                                                                  childrel->cheapest_total_path);
252                 else
253                         subpaths_valid = false;
254
255                 /* Same idea, but for a partial plan. */
256                 if (childrel->partial_pathlist != NIL)
257                         partial_subpaths = accumulate_append_subpath(partial_subpaths,
258                                                                                                                  linitial(childrel->partial_pathlist));
259                 else
260                         partial_subpaths_valid = false;
261
262                 /*
263                  * Collect lists of all the available path orderings and
264                  * parameterizations for all the children.  We use these as a
265                  * heuristic to indicate which sort orderings and parameterizations we
266                  * should build Append and MergeAppend paths for.
267                  */
268                 foreach(lcp, childrel->pathlist)
269                 {
270                         Path       *childpath = (Path *) lfirst(lcp);
271                         List       *childkeys = childpath->pathkeys;
272                         Relids          childouter = PATH_REQ_OUTER(childpath);
273
274                         /* Unsorted paths don't contribute to pathkey list */
275                         if (childkeys != NIL)
276                         {
277                                 ListCell   *lpk;
278                                 bool            found = false;
279
280                                 /* Have we already seen this ordering? */
281                                 foreach(lpk, all_child_pathkeys)
282                                 {
283                                         List       *existing_pathkeys = (List *) lfirst(lpk);
284
285                                         if (compare_pathkeys(existing_pathkeys,
286                                                                                  childkeys) == PATHKEYS_EQUAL)
287                                         {
288                                                 found = true;
289                                                 break;
290                                         }
291                                 }
292                                 if (!found)
293                                 {
294                                         /* No, so add it to all_child_pathkeys */
295                                         all_child_pathkeys = lappend(all_child_pathkeys,
296                                                                                                  childkeys);
297                                 }
298                         }
299
300                         /* Unparameterized paths don't contribute to param-set list */
301                         if (childouter)
302                         {
303                                 ListCell   *lco;
304                                 bool            found = false;
305
306                                 /* Have we already seen this param set? */
307                                 foreach(lco, all_child_outers)
308                                 {
309                                         Relids          existing_outers = (Relids) lfirst(lco);
310
311                                         if (bms_equal(existing_outers, childouter))
312                                         {
313                                                 found = true;
314                                                 break;
315                                         }
316                                 }
317                                 if (!found)
318                                 {
319                                         /* No, so add it to all_child_outers */
320                                         all_child_outers = lappend(all_child_outers,
321                                                                                            childouter);
322                                 }
323                         }
324                 }
325         }
326
327         /*
328          * If we found unparameterized paths for all children, build an unordered,
329          * unparameterized Append path for the rel.  (Note: this is correct even
330          * if we have zero or one live subpath due to constraint exclusion.)
331          */
332         if (subpaths_valid)
333                 add_path(rel, (Path *) create_append_path(rel, subpaths, NULL, 0,
334                                                                                                   partitioned_rels));
335
336         /*
337          * Consider an append of partial unordered, unparameterized partial paths.
338          */
339         if (partial_subpaths_valid && partial_subpaths != NIL)
340         {
341                 AppendPath *appendpath;
342                 ListCell   *lc;
343                 int                     parallel_workers = 0;
344
345                 /*
346                  * Decide on the number of workers to request for this append path.
347                  * For now, we just use the maximum value from among the members.  It
348                  * might be useful to use a higher number if the Append node were
349                  * smart enough to spread out the workers, but it currently isn't.
350                  */
351                 foreach(lc, partial_subpaths)
352                 {
353                         Path       *path = lfirst(lc);
354
355                         parallel_workers = Max(parallel_workers, path->parallel_workers);
356                 }
357                 Assert(parallel_workers > 0);
358
359                 /* Generate a partial append path. */
360                 appendpath = create_append_path(rel, partial_subpaths, NULL,
361                                                                                 parallel_workers, partitioned_rels);
362                 add_partial_path(rel, (Path *) appendpath);
363         }
364
365         /*
366          * Also build unparameterized MergeAppend paths based on the collected
367          * list of child pathkeys.
368          */
369         if (subpaths_valid)
370                 generate_mergeappend_paths(root, rel, live_childrels,
371                                                                    all_child_pathkeys,
372                                                                    partitioned_rels);
373
374         /*
375          * Build Append paths for each parameterization seen among the child rels.
376          * (This may look pretty expensive, but in most cases of practical
377          * interest, the child rels will expose mostly the same parameterizations,
378          * so that not that many cases actually get considered here.)
379          *
380          * The Append node itself cannot enforce quals, so all qual checking must
381          * be done in the child paths.  This means that to have a parameterized
382          * Append path, we must have the exact same parameterization for each
383          * child path; otherwise some children might be failing to check the
384          * moved-down quals.  To make them match up, we can try to increase the
385          * parameterization of lesser-parameterized paths.
386          */
387         foreach(l, all_child_outers)
388         {
389                 Relids          required_outer = (Relids) lfirst(l);
390                 ListCell   *lcr;
391
392                 /* Select the child paths for an Append with this parameterization */
393                 subpaths = NIL;
394                 subpaths_valid = true;
395                 foreach(lcr, live_childrels)
396                 {
397                         RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
398                         Path       *subpath;
399
400                         subpath = get_cheapest_parameterized_child_path(root,
401                                                                                                                         childrel,
402                                                                                                                         required_outer);
403                         if (subpath == NULL)
404                         {
405                                 /* failed to make a suitable path for this child */
406                                 subpaths_valid = false;
407                                 break;
408                         }
409                         subpaths = accumulate_append_subpath(subpaths, subpath);
410                 }
411
412                 if (subpaths_valid)
413                         add_path(rel, (Path *)
414                                          create_append_path(rel, subpaths, required_outer, 0,
415                                                                                 partitioned_rels));
416         }
417 }
418
419
420 /*
421  * generate_mergeappend_paths
422  *              Generate MergeAppend paths for an append relation
423  *
424  * Generate a path for each ordering (pathkey list) appearing in
425  * all_child_pathkeys.
426  *
427  * We consider both cheapest-startup and cheapest-total cases, ie, for each
428  * interesting ordering, collect all the cheapest startup subpaths and all the
429  * cheapest total paths, and build a MergeAppend path for each case.
430  *
431  * We don't currently generate any parameterized MergeAppend paths.  While
432  * it would not take much more code here to do so, it's very unclear that it
433  * is worth the planning cycles to investigate such paths: there's little
434  * use for an ordered path on the inside of a nestloop.  In fact, it's likely
435  * that the current coding of add_path would reject such paths out of hand,
436  * because add_path gives no credit for sort ordering of parameterized paths,
437  * and a parameterized MergeAppend is going to be more expensive than the
438  * corresponding parameterized Append path.  If we ever try harder to support
439  * parameterized mergejoin plans, it might be worth adding support for
440  * parameterized MergeAppends to feed such joins.  (See notes in
441  * optimizer/README for why that might not ever happen, though.)
442  */
443 static void
444 generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
445                                                    List *live_childrels,
446                                                    List *all_child_pathkeys,
447                                                    List *partitioned_rels)
448 {
449         ListCell   *lcp;
450
451         foreach(lcp, all_child_pathkeys)
452         {
453                 List       *pathkeys = (List *) lfirst(lcp);
454                 List       *startup_subpaths = NIL;
455                 List       *total_subpaths = NIL;
456                 bool            startup_neq_total = false;
457                 ListCell   *lcr;
458
459                 /* Select the child paths for this ordering... */
460                 foreach(lcr, live_childrels)
461                 {
462                         RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
463                         Path       *cheapest_startup,
464                                            *cheapest_total;
465
466                         /* Locate the right paths, if they are available. */
467                         cheapest_startup =
468                                 get_cheapest_path_for_pathkeys(childrel->pathlist,
469                                                                                            pathkeys,
470                                                                                            NULL,
471                                                                                            STARTUP_COST,
472                                                                                            false);
473                         cheapest_total =
474                                 get_cheapest_path_for_pathkeys(childrel->pathlist,
475                                                                                            pathkeys,
476                                                                                            NULL,
477                                                                                            TOTAL_COST,
478                                                                                            false);
479
480                         /*
481                          * If we can't find any paths with the right order just use the
482                          * cheapest-total path; we'll have to sort it later.
483                          */
484                         if (cheapest_startup == NULL || cheapest_total == NULL)
485                         {
486                                 cheapest_startup = cheapest_total =
487                                         childrel->cheapest_total_path;
488                                 /* Assert we do have an unparameterized path for this child */
489                                 Assert(cheapest_total->param_info == NULL);
490                         }
491
492                         /*
493                          * Notice whether we actually have different paths for the
494                          * "cheapest" and "total" cases; frequently there will be no point
495                          * in two create_merge_append_path() calls.
496                          */
497                         if (cheapest_startup != cheapest_total)
498                                 startup_neq_total = true;
499
500                         startup_subpaths =
501                                 accumulate_append_subpath(startup_subpaths, cheapest_startup);
502                         total_subpaths =
503                                 accumulate_append_subpath(total_subpaths, cheapest_total);
504                 }
505
506                 /* ... and build the MergeAppend paths */
507                 add_path(rel, (Path *) create_merge_append_path(root,
508                                                                                                                 rel,
509                                                                                                                 startup_subpaths,
510                                                                                                                 pathkeys,
511                                                                                                                 NULL,
512                                                                                                                 partitioned_rels));
513                 if (startup_neq_total)
514                         add_path(rel, (Path *) create_merge_append_path(root,
515                                                                                                                         rel,
516                                                                                                                         total_subpaths,
517                                                                                                                         pathkeys,
518                                                                                                                         NULL,
519                                                                                                                         partitioned_rels));
520         }
521 }
522
523
524 /*
525  * get_cheapest_parameterized_child_path
526  *              Get cheapest path for this relation that has exactly the requested
527  *              parameterization.
528  *
529  * Returns NULL if unable to create such a path.
530  */
531 static Path *
532 get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
533                                                                           Relids required_outer)
534 {
535         Path       *cheapest;
536         ListCell   *lc;
537
538         /*
539          * Look up the cheapest existing path with no more than the needed
540          * parameterization.  If it has exactly the needed parameterization, we're
541          * done.
542          */
543         cheapest = get_cheapest_path_for_pathkeys(rel->pathlist,
544                                                                                           NIL,
545                                                                                           required_outer,
546                                                                                           TOTAL_COST,
547                                                                                           false);
548         Assert(cheapest != NULL);
549         if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
550                 return cheapest;
551
552         /*
553          * Otherwise, we can "reparameterize" an existing path to match the given
554          * parameterization, which effectively means pushing down additional
555          * joinquals to be checked within the path's scan.  However, some existing
556          * paths might check the available joinquals already while others don't;
557          * therefore, it's not clear which existing path will be cheapest after
558          * reparameterization.  We have to go through them all and find out.
559          */
560         cheapest = NULL;
561         foreach(lc, rel->pathlist)
562         {
563                 Path       *path = (Path *) lfirst(lc);
564
565                 /* Can't use it if it needs more than requested parameterization */
566                 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
567                         continue;
568
569                 /*
570                  * Reparameterization can only increase the path's cost, so if it's
571                  * already more expensive than the current cheapest, forget it.
572                  */
573                 if (cheapest != NULL &&
574                         compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
575                         continue;
576
577                 /* Reparameterize if needed, then recheck cost */
578                 if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
579                 {
580                         path = reparameterize_path(root, path, required_outer, 1.0);
581                         if (path == NULL)
582                                 continue;               /* failed to reparameterize this one */
583                         Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
584
585                         if (cheapest != NULL &&
586                                 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
587                                 continue;
588                 }
589
590                 /* We have a new best path */
591                 cheapest = path;
592         }
593
594         /* Return the best path, or NULL if we found no suitable candidate */
595         return cheapest;
596 }
597
598
599 /*
600  * accumulate_append_subpath
601  *              Add a subpath to the list being built for an Append or MergeAppend
602  *
603  * It's possible that the child is itself an Append or MergeAppend path, in
604  * which case we can "cut out the middleman" and just add its child paths to
605  * our own list.  (We don't try to do this earlier because we need to apply
606  * both levels of transformation to the quals.)
607  *
608  * Note that if we omit a child MergeAppend in this way, we are effectively
609  * omitting a sort step, which seems fine: if the parent is to be an Append,
610  * its result would be unsorted anyway, while if the parent is to be a
611  * MergeAppend, there's no point in a separate sort on a child.
612  */
613 static List *
614 accumulate_append_subpath(List *subpaths, Path *path)
615 {
616         if (IsA(path, AppendPath))
617         {
618                 AppendPath *apath = (AppendPath *) path;
619
620                 /* list_copy is important here to avoid sharing list substructure */
621                 return list_concat(subpaths, list_copy(apath->subpaths));
622         }
623         else if (IsA(path, MergeAppendPath))
624         {
625                 MergeAppendPath *mpath = (MergeAppendPath *) path;
626
627                 /* list_copy is important here to avoid sharing list substructure */
628                 return list_concat(subpaths, list_copy(mpath->subpaths));
629         }
630         else
631                 return lappend(subpaths, path);
632 }
633
634
635 /*
636  * standard_join_search
637  *        Find possible joinpaths for a query by successively finding ways
638  *        to join component relations into join relations.
639  *
640  * 'levels_needed' is the number of iterations needed, ie, the number of
641  *              independent jointree items in the query.  This is > 1.
642  *
643  * 'initial_rels' is a list of RelOptInfo nodes for each independent
644  *              jointree item.  These are the components to be joined together.
645  *              Note that levels_needed == list_length(initial_rels).
646  *
647  * Returns the final level of join relations, i.e., the relation that is
648  * the result of joining all the original relations together.
649  * At least one implementation path must be provided for this relation and
650  * all required sub-relations.
651  *
652  * To support loadable plugins that modify planner behavior by changing the
653  * join searching algorithm, we provide a hook variable that lets a plugin
654  * replace or supplement this function.  Any such hook must return the same
655  * final join relation as the standard code would, but it might have a
656  * different set of implementation paths attached, and only the sub-joinrels
657  * needed for these paths need have been instantiated.
658  *
659  * Note to plugin authors: the functions invoked during standard_join_search()
660  * modify root->join_rel_list and root->join_rel_hash.  If you want to do more
661  * than one join-order search, you'll probably need to save and restore the
662  * original states of those data structures.  See geqo_eval() for an example.
663  */
664 RelOptInfo *
665 standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
666 {
667         int                     lev;
668         RelOptInfo *rel;
669
670         /*
671          * This function cannot be invoked recursively within any one planning
672          * problem, so join_rel_level[] can't be in use already.
673          */
674         Assert(root->join_rel_level == NULL);
675
676         /*
677          * We employ a simple "dynamic programming" algorithm: we first find all
678          * ways to build joins of two jointree items, then all ways to build joins
679          * of three items (from two-item joins and single items), then four-item
680          * joins, and so on until we have considered all ways to join all the
681          * items into one rel.
682          *
683          * root->join_rel_level[j] is a list of all the j-item rels.  Initially we
684          * set root->join_rel_level[1] to represent all the single-jointree-item
685          * relations.
686          */
687         root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
688
689         root->join_rel_level[1] = initial_rels;
690
691         for (lev = 2; lev <= levels_needed; lev++)
692         {
693                 ListCell   *lc;
694
695                 /*
696                  * Determine all possible pairs of relations to be joined at this
697                  * level, and build paths for making each one from every available
698                  * pair of lower-level relations.
699                  */
700                 join_search_one_level(root, lev);
701
702                 /*
703                  * Run generate_gather_paths() for each just-processed joinrel.  We
704                  * could not do this earlier because both regular and partial paths
705                  * can get added to a particular joinrel at multiple times within
706                  * join_search_one_level.  After that, we're done creating paths for
707                  * the joinrel, so run set_cheapest().
708                  */
709                 foreach(lc, root->join_rel_level[lev])
710                 {
711                         rel = (RelOptInfo *) lfirst(lc);
712
713                         /* Create GatherPaths for any useful partial paths for rel */
714                         generate_gather_paths(root, rel);
715
716                         /* Find and save the cheapest paths for this rel */
717                         set_cheapest(rel);
718
719 #ifdef OPTIMIZER_DEBUG
720                         debug_print_rel(root, rel);
721 #endif
722                 }
723         }
724
725         /*
726          * We should have a single rel at the final level.
727          */
728         if (root->join_rel_level[levels_needed] == NIL)
729                 elog(ERROR, "failed to build any %d-way joins", levels_needed);
730         Assert(list_length(root->join_rel_level[levels_needed]) == 1);
731
732         rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
733
734         root->join_rel_level = NULL;
735
736         return rel;
737 }
738
739
740 /*
741  * join_search_one_level
742  *        Consider ways to produce join relations containing exactly 'level'
743  *        jointree items.  (This is one step of the dynamic-programming method
744  *        embodied in standard_join_search.)  Join rel nodes for each feasible
745  *        combination of lower-level rels are created and returned in a list.
746  *        Implementation paths are created for each such joinrel, too.
747  *
748  * level: level of rels we want to make this time
749  * root->join_rel_level[j], 1 <= j < level, is a list of rels containing j items
750  *
751  * The result is returned in root->join_rel_level[level].
752  */
753 void
754 join_search_one_level(PlannerInfo *root, int level)
755 {
756         List      **joinrels = root->join_rel_level;
757         ListCell   *r;
758         int                     k;
759
760         Assert(joinrels[level] == NIL);
761
762         /* Set join_cur_level so that new joinrels are added to proper list */
763         root->join_cur_level = level;
764
765         /*
766          * First, consider left-sided and right-sided plans, in which rels of
767          * exactly level-1 member relations are joined against initial relations.
768          * We prefer to join using join clauses, but if we find a rel of level-1
769          * members that has no join clauses, we will generate Cartesian-product
770          * joins against all initial rels not already contained in it.
771          */
772         foreach(r, joinrels[level - 1])
773         {
774                 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
775
776                 if (old_rel->joininfo != NIL || old_rel->has_eclass_joins ||
777                         has_join_restriction(root, old_rel))
778                 {
779                         /*
780                          * There are join clauses or join order restrictions relevant to
781                          * this rel, so consider joins between this rel and (only) those
782                          * initial rels it is linked to by a clause or restriction.
783                          *
784                          * At level 2 this condition is symmetric, so there is no need to
785                          * look at initial rels before this one in the list; we already
786                          * considered such joins when we were at the earlier rel.  (The
787                          * mirror-image joins are handled automatically by make_join_rel.)
788                          * In later passes (level > 2), we join rels of the previous level
789                          * to each initial rel they don't already include but have a join
790                          * clause or restriction with.
791                          */
792                         ListCell   *other_rels;
793
794                         if (level == 2)         /* consider remaining initial rels */
795                                 other_rels = lnext(r);
796                         else                            /* consider all initial rels */
797                                 other_rels = list_head(joinrels[1]);
798
799                         make_rels_by_clause_joins(root,
800                                                                           old_rel,
801                                                                           other_rels);
802                 }
803                 else
804                 {
805                         /*
806                          * Oops, we have a relation that is not joined to any other
807                          * relation, either directly or by join-order restrictions.
808                          * Cartesian product time.
809                          *
810                          * We consider a cartesian product with each not-already-included
811                          * initial rel, whether it has other join clauses or not.  At
812                          * level 2, if there are two or more clauseless initial rels, we
813                          * will redundantly consider joining them in both directions; but
814                          * such cases aren't common enough to justify adding complexity to
815                          * avoid the duplicated effort.
816                          */
817                         make_rels_by_clauseless_joins(root,
818                                                                                   old_rel,
819                                                                                   list_head(joinrels[1]));
820                 }
821         }
822
823         /*
824          * Now, consider "bushy plans" in which relations of k initial rels are
825          * joined to relations of level-k initial rels, for 2 <= k <= level-2.
826          *
827          * We only consider bushy-plan joins for pairs of rels where there is a
828          * suitable join clause (or join order restriction), in order to avoid
829          * unreasonable growth of planning time.
830          */
831         for (k = 2;; k++)
832         {
833                 int                     other_level = level - k;
834
835                 /*
836                  * Since make_join_rel(x, y) handles both x,y and y,x cases, we only
837                  * need to go as far as the halfway point.
838                  */
839                 if (k > other_level)
840                         break;
841
842                 foreach(r, joinrels[k])
843                 {
844                         RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
845                         ListCell   *other_rels;
846                         ListCell   *r2;
847
848                         /*
849                          * We can ignore relations without join clauses here, unless they
850                          * participate in join-order restrictions --- then we might have
851                          * to force a bushy join plan.
852                          */
853                         if (old_rel->joininfo == NIL && !old_rel->has_eclass_joins &&
854                                 !has_join_restriction(root, old_rel))
855                                 continue;
856
857                         if (k == other_level)
858                                 other_rels = lnext(r);  /* only consider remaining rels */
859                         else
860                                 other_rels = list_head(joinrels[other_level]);
861
862                         for_each_cell(r2, other_rels)
863                         {
864                                 RelOptInfo *new_rel = (RelOptInfo *) lfirst(r2);
865
866                                 if (!bms_overlap(old_rel->relids, new_rel->relids))
867                                 {
868                                         /*
869                                          * OK, we can build a rel of the right level from this
870                                          * pair of rels.  Do so if there is at least one relevant
871                                          * join clause or join order restriction.
872                                          */
873                                         if (have_relevant_joinclause(root, old_rel, new_rel) ||
874                                                 have_join_order_restriction(root, old_rel, new_rel))
875                                         {
876                                                 (void) make_join_rel(root, old_rel, new_rel);
877                                         }
878                                 }
879                         }
880                 }
881         }
882
883         /*----------
884          * Last-ditch effort: if we failed to find any usable joins so far, force
885          * a set of cartesian-product joins to be generated.  This handles the
886          * special case where all the available rels have join clauses but we
887          * cannot use any of those clauses yet.  This can only happen when we are
888          * considering a join sub-problem (a sub-joinlist) and all the rels in the
889          * sub-problem have only join clauses with rels outside the sub-problem.
890          * An example is
891          *
892          *              SELECT ... FROM a INNER JOIN b ON TRUE, c, d, ...
893          *              WHERE a.w = c.x and b.y = d.z;
894          *
895          * If the "a INNER JOIN b" sub-problem does not get flattened into the
896          * upper level, we must be willing to make a cartesian join of a and b;
897          * but the code above will not have done so, because it thought that both
898          * a and b have joinclauses.  We consider only left-sided and right-sided
899          * cartesian joins in this case (no bushy).
900          *----------
901          */
902         if (joinrels[level] == NIL)
903         {
904                 /*
905                  * This loop is just like the first one, except we always call
906                  * make_rels_by_clauseless_joins().
907                  */
908                 foreach(r, joinrels[level - 1])
909                 {
910                         RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
911
912                         make_rels_by_clauseless_joins(root,
913                                                                                   old_rel,
914                                                                                   list_head(joinrels[1]));
915                 }
916
917                 /*----------
918                  * When special joins are involved, there may be no legal way
919                  * to make an N-way join for some values of N.  For example consider
920                  *
921                  * SELECT ... FROM t1 WHERE
922                  *       x IN (SELECT ... FROM t2,t3 WHERE ...) AND
923                  *       y IN (SELECT ... FROM t4,t5 WHERE ...)
924                  *
925                  * We will flatten this query to a 5-way join problem, but there are
926                  * no 4-way joins that join_is_legal() will consider legal.  We have
927                  * to accept failure at level 4 and go on to discover a workable
928                  * bushy plan at level 5.
929                  *
930                  * However, if there are no special joins and no lateral references
931                  * then join_is_legal() should never fail, and so the following sanity
932                  * check is useful.
933                  *----------
934                  */
935                 if (joinrels[level] == NIL &&
936                         root->join_info_list == NIL &&
937                         !root->hasLateralRTEs)
938                         elog(ERROR, "failed to build any %d-way joins", level);
939         }
940 }
941
942
943 /*
944  * make_rels_by_clause_joins
945  *        Build joins between the given relation 'old_rel' and other relations
946  *        that participate in join clauses that 'old_rel' also participates in
947  *        (or participate in join-order restrictions with it).
948  *        The join rels are returned in root->join_rel_level[join_cur_level].
949  *
950  * Note: at levels above 2 we will generate the same joined relation in
951  * multiple ways --- for example (a join b) join c is the same RelOptInfo as
952  * (b join c) join a, though the second case will add a different set of Paths
953  * to it.  This is the reason for using the join_rel_level mechanism, which
954  * automatically ensures that each new joinrel is only added to the list once.
955  *
956  * 'old_rel' is the relation entry for the relation to be joined
957  * 'other_rels': the first cell in a linked list containing the other
958  * rels to be considered for joining
959  *
960  * Currently, this is only used with initial rels in other_rels, but it
961  * will work for joining to joinrels too.
962  */
963 static void
964 make_rels_by_clause_joins(PlannerInfo *root,
965                                                   RelOptInfo *old_rel,
966                                                   ListCell *other_rels)
967 {
968         ListCell   *l;
969
970         for_each_cell(l, other_rels)
971         {
972                 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
973
974                 if (!bms_overlap(old_rel->relids, other_rel->relids) &&
975                         (have_relevant_joinclause(root, old_rel, other_rel) ||
976                          have_join_order_restriction(root, old_rel, other_rel)))
977                 {
978                         (void) make_join_rel(root, old_rel, other_rel);
979                 }
980         }
981 }
982
983
984 /*
985  * make_rels_by_clauseless_joins
986  *        Given a relation 'old_rel' and a list of other relations
987  *        'other_rels', create a join relation between 'old_rel' and each
988  *        member of 'other_rels' that isn't already included in 'old_rel'.
989  *        The join rels are returned in root->join_rel_level[join_cur_level].
990  *
991  * 'old_rel' is the relation entry for the relation to be joined
992  * 'other_rels': the first cell of a linked list containing the
993  * other rels to be considered for joining
994  *
995  * Currently, this is only used with initial rels in other_rels, but it would
996  * work for joining to joinrels too.
997  */
998 static void
999 make_rels_by_clauseless_joins(PlannerInfo *root,
1000                                                           RelOptInfo *old_rel,
1001                                                           ListCell *other_rels)
1002 {
1003         ListCell   *l;
1004
1005         for_each_cell(l, other_rels)
1006         {
1007                 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
1008
1009                 if (!bms_overlap(other_rel->relids, old_rel->relids))
1010                 {
1011                         (void) make_join_rel(root, old_rel, other_rel);
1012                 }
1013         }
1014 }
1015
1016
1017 /*
1018  * join_is_legal
1019  *         Determine whether a proposed join is legal given the query's
1020  *         join order constraints; and if it is, determine the join type.
1021  *
1022  * Caller must supply not only the two rels, but the union of their relids.
1023  * (We could simplify the API by computing joinrelids locally, but this
1024  * would be redundant work in the normal path through make_join_rel.)
1025  *
1026  * On success, *sjinfo_p is set to NULL if this is to be a plain inner join,
1027  * else it's set to point to the associated SpecialJoinInfo node.  Also,
1028  * *reversed_p is set TRUE if the given relations need to be swapped to
1029  * match the SpecialJoinInfo node.
1030  */
1031 static bool
1032 join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
1033                           Relids joinrelids,
1034                           SpecialJoinInfo **sjinfo_p, bool *reversed_p)
1035 {
1036         SpecialJoinInfo *match_sjinfo;
1037         bool            reversed;
1038         bool            unique_ified;
1039         bool            must_be_leftjoin;
1040         ListCell   *l;
1041
1042         /*
1043          * Ensure output params are set on failure return.  This is just to
1044          * suppress uninitialized-variable warnings from overly anal compilers.
1045          */
1046         *sjinfo_p = NULL;
1047         *reversed_p = false;
1048
1049         /*
1050          * If we have any special joins, the proposed join might be illegal; and
1051          * in any case we have to determine its join type.  Scan the join info
1052          * list for matches and conflicts.
1053          */
1054         match_sjinfo = NULL;
1055         reversed = false;
1056         unique_ified = false;
1057         must_be_leftjoin = false;
1058
1059         foreach(l, root->join_info_list)
1060         {
1061                 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1062
1063                 /*
1064                  * This special join is not relevant unless its RHS overlaps the
1065                  * proposed join.  (Check this first as a fast path for dismissing
1066                  * most irrelevant SJs quickly.)
1067                  */
1068                 if (!bms_overlap(sjinfo->min_righthand, joinrelids))
1069                         continue;
1070
1071                 /*
1072                  * Also, not relevant if proposed join is fully contained within RHS
1073                  * (ie, we're still building up the RHS).
1074                  */
1075                 if (bms_is_subset(joinrelids, sjinfo->min_righthand))
1076                         continue;
1077
1078                 /*
1079                  * Also, not relevant if SJ is already done within either input.
1080                  */
1081                 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
1082                         bms_is_subset(sjinfo->min_righthand, rel1->relids))
1083                         continue;
1084                 if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
1085                         bms_is_subset(sjinfo->min_righthand, rel2->relids))
1086                         continue;
1087
1088                 /*
1089                  * If it's a semijoin and we already joined the RHS to any other rels
1090                  * within either input, then we must have unique-ified the RHS at that
1091                  * point (see below).  Therefore the semijoin is no longer relevant in
1092                  * this join path.
1093                  */
1094                 if (sjinfo->jointype == JOIN_SEMI)
1095                 {
1096                         if (bms_is_subset(sjinfo->syn_righthand, rel1->relids) &&
1097                                 !bms_equal(sjinfo->syn_righthand, rel1->relids))
1098                                 continue;
1099                         if (bms_is_subset(sjinfo->syn_righthand, rel2->relids) &&
1100                                 !bms_equal(sjinfo->syn_righthand, rel2->relids))
1101                                 continue;
1102                 }
1103
1104                 /*
1105                  * If one input contains min_lefthand and the other contains
1106                  * min_righthand, then we can perform the SJ at this join.
1107                  *
1108                  * Reject if we get matches to more than one SJ; that implies we're
1109                  * considering something that's not really valid.
1110                  */
1111                 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
1112                         bms_is_subset(sjinfo->min_righthand, rel2->relids))
1113                 {
1114                         if (match_sjinfo)
1115                                 return false;   /* invalid join path */
1116                         match_sjinfo = sjinfo;
1117                         reversed = false;
1118                 }
1119                 else if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
1120                                  bms_is_subset(sjinfo->min_righthand, rel1->relids))
1121                 {
1122                         if (match_sjinfo)
1123                                 return false;   /* invalid join path */
1124                         match_sjinfo = sjinfo;
1125                         reversed = true;
1126                 }
1127                 else if (sjinfo->jointype == JOIN_SEMI &&
1128                                  bms_equal(sjinfo->syn_righthand, rel2->relids) &&
1129                                  create_unique_path(root, rel2, rel2->cheapest_total_path,
1130                                                                         sjinfo) != NULL)
1131                 {
1132                         /*----------
1133                          * For a semijoin, we can join the RHS to anything else by
1134                          * unique-ifying the RHS (if the RHS can be unique-ified).
1135                          * We will only get here if we have the full RHS but less
1136                          * than min_lefthand on the LHS.
1137                          *
1138                          * The reason to consider such a join path is exemplified by
1139                          *      SELECT ... FROM a,b WHERE (a.x,b.y) IN (SELECT c1,c2 FROM c)
1140                          * If we insist on doing this as a semijoin we will first have
1141                          * to form the cartesian product of A*B.  But if we unique-ify
1142                          * C then the semijoin becomes a plain innerjoin and we can join
1143                          * in any order, eg C to A and then to B.  When C is much smaller
1144                          * than A and B this can be a huge win.  So we allow C to be
1145                          * joined to just A or just B here, and then make_join_rel has
1146                          * to handle the case properly.
1147                          *
1148                          * Note that actually we'll allow unique-ified C to be joined to
1149                          * some other relation D here, too.  That is legal, if usually not
1150                          * very sane, and this routine is only concerned with legality not
1151                          * with whether the join is good strategy.
1152                          *----------
1153                          */
1154                         if (match_sjinfo)
1155                                 return false;   /* invalid join path */
1156                         match_sjinfo = sjinfo;
1157                         reversed = false;
1158                         unique_ified = true;
1159                 }
1160                 else if (sjinfo->jointype == JOIN_SEMI &&
1161                                  bms_equal(sjinfo->syn_righthand, rel1->relids) &&
1162                                  create_unique_path(root, rel1, rel1->cheapest_total_path,
1163                                                                         sjinfo) != NULL)
1164                 {
1165                         /* Reversed semijoin case */
1166                         if (match_sjinfo)
1167                                 return false;   /* invalid join path */
1168                         match_sjinfo = sjinfo;
1169                         reversed = true;
1170                         unique_ified = true;
1171                 }
1172                 else
1173                 {
1174                         /*
1175                          * Otherwise, the proposed join overlaps the RHS but isn't a valid
1176                          * implementation of this SJ.  But don't panic quite yet: the RHS
1177                          * violation might have occurred previously, in one or both input
1178                          * relations, in which case we must have previously decided that
1179                          * it was OK to commute some other SJ with this one.  If we need
1180                          * to perform this join to finish building up the RHS, rejecting
1181                          * it could lead to not finding any plan at all.  (This can occur
1182                          * because of the heuristics elsewhere in this file that postpone
1183                          * clauseless joins: we might not consider doing a clauseless join
1184                          * within the RHS until after we've performed other, validly
1185                          * commutable SJs with one or both sides of the clauseless join.)
1186                          * This consideration boils down to the rule that if both inputs
1187                          * overlap the RHS, we can allow the join --- they are either
1188                          * fully within the RHS, or represent previously-allowed joins to
1189                          * rels outside it.
1190                          */
1191                         if (bms_overlap(rel1->relids, sjinfo->min_righthand) &&
1192                                 bms_overlap(rel2->relids, sjinfo->min_righthand))
1193                                 continue;               /* assume valid previous violation of RHS */
1194
1195                         /*
1196                          * The proposed join could still be legal, but only if we're
1197                          * allowed to associate it into the RHS of this SJ.  That means
1198                          * this SJ must be a LEFT join (not SEMI or ANTI, and certainly
1199                          * not FULL) and the proposed join must not overlap the LHS.
1200                          */
1201                         if (sjinfo->jointype != JOIN_LEFT ||
1202                                 bms_overlap(joinrelids, sjinfo->min_lefthand))
1203                                 return false;   /* invalid join path */
1204
1205                         /*
1206                          * To be valid, the proposed join must be a LEFT join; otherwise
1207                          * it can't associate into this SJ's RHS.  But we may not yet have
1208                          * found the SpecialJoinInfo matching the proposed join, so we
1209                          * can't test that yet.  Remember the requirement for later.
1210                          */
1211                         must_be_leftjoin = true;
1212                 }
1213         }
1214
1215         /*
1216          * Fail if violated any SJ's RHS and didn't match to a LEFT SJ: the
1217          * proposed join can't associate into an SJ's RHS.
1218          *
1219          * Also, fail if the proposed join's predicate isn't strict; we're
1220          * essentially checking to see if we can apply outer-join identity 3, and
1221          * that's a requirement.  (This check may be redundant with checks in
1222          * make_outerjoininfo, but I'm not quite sure, and it's cheap to test.)
1223          */
1224         if (must_be_leftjoin &&
1225                 (match_sjinfo == NULL ||
1226                  match_sjinfo->jointype != JOIN_LEFT ||
1227                  !match_sjinfo->lhs_strict))
1228                 return false;                   /* invalid join path */
1229
1230         /*
1231          * We also have to check for constraints imposed by LATERAL references.
1232          */
1233         if (root->hasLateralRTEs)
1234         {
1235                 bool            lateral_fwd;
1236                 bool            lateral_rev;
1237                 Relids          join_lateral_rels;
1238
1239                 /*
1240                  * The proposed rels could each contain lateral references to the
1241                  * other, in which case the join is impossible.  If there are lateral
1242                  * references in just one direction, then the join has to be done with
1243                  * a nestloop with the lateral referencer on the inside.  If the join
1244                  * matches an SJ that cannot be implemented by such a nestloop, the
1245                  * join is impossible.
1246                  *
1247                  * Also, if the lateral reference is only indirect, we should reject
1248                  * the join; whatever rel(s) the reference chain goes through must be
1249                  * joined to first.
1250                  *
1251                  * Another case that might keep us from building a valid plan is the
1252                  * implementation restriction described by have_dangerous_phv().
1253                  */
1254                 lateral_fwd = bms_overlap(rel1->relids, rel2->lateral_relids);
1255                 lateral_rev = bms_overlap(rel2->relids, rel1->lateral_relids);
1256                 if (lateral_fwd && lateral_rev)
1257                         return false;           /* have lateral refs in both directions */
1258                 if (lateral_fwd)
1259                 {
1260                         /* has to be implemented as nestloop with rel1 on left */
1261                         if (match_sjinfo &&
1262                                 (reversed ||
1263                                  unique_ified ||
1264                                  match_sjinfo->jointype == JOIN_FULL))
1265                                 return false;   /* not implementable as nestloop */
1266                         /* check there is a direct reference from rel2 to rel1 */
1267                         if (!bms_overlap(rel1->relids, rel2->direct_lateral_relids))
1268                                 return false;   /* only indirect refs, so reject */
1269                         /* check we won't have a dangerous PHV */
1270                         if (have_dangerous_phv(root, rel1->relids, rel2->lateral_relids))
1271                                 return false;   /* might be unable to handle required PHV */
1272                 }
1273                 else if (lateral_rev)
1274                 {
1275                         /* has to be implemented as nestloop with rel2 on left */
1276                         if (match_sjinfo &&
1277                                 (!reversed ||
1278                                  unique_ified ||
1279                                  match_sjinfo->jointype == JOIN_FULL))
1280                                 return false;   /* not implementable as nestloop */
1281                         /* check there is a direct reference from rel1 to rel2 */
1282                         if (!bms_overlap(rel2->relids, rel1->direct_lateral_relids))
1283                                 return false;   /* only indirect refs, so reject */
1284                         /* check we won't have a dangerous PHV */
1285                         if (have_dangerous_phv(root, rel2->relids, rel1->lateral_relids))
1286                                 return false;   /* might be unable to handle required PHV */
1287                 }
1288
1289                 /*
1290                  * LATERAL references could also cause problems later on if we accept
1291                  * this join: if the join's minimum parameterization includes any rels
1292                  * that would have to be on the inside of an outer join with this join
1293                  * rel, then it's never going to be possible to build the complete
1294                  * query using this join.  We should reject this join not only because
1295                  * it'll save work, but because if we don't, the clauseless-join
1296                  * heuristics might think that legality of this join means that some
1297                  * other join rel need not be formed, and that could lead to failure
1298                  * to find any plan at all.  We have to consider not only rels that
1299                  * are directly on the inner side of an OJ with the joinrel, but also
1300                  * ones that are indirectly so, so search to find all such rels.
1301                  */
1302                 join_lateral_rels = min_join_parameterization(root, joinrelids,
1303                                                                                                           rel1, rel2);
1304                 if (join_lateral_rels)
1305                 {
1306                         Relids          join_plus_rhs = bms_copy(joinrelids);
1307                         bool            more;
1308
1309                         do
1310                         {
1311                                 more = false;
1312                                 foreach(l, root->join_info_list)
1313                                 {
1314                                         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1315
1316                                         /* ignore full joins --- their ordering is predetermined */
1317                                         if (sjinfo->jointype == JOIN_FULL)
1318                                                 continue;
1319
1320                                         if (bms_overlap(sjinfo->min_lefthand, join_plus_rhs) &&
1321                                                 !bms_is_subset(sjinfo->min_righthand, join_plus_rhs))
1322                                         {
1323                                                 join_plus_rhs = bms_add_members(join_plus_rhs,
1324                                                                                                                 sjinfo->min_righthand);
1325                                                 more = true;
1326                                         }
1327                                 }
1328                         } while (more);
1329                         if (bms_overlap(join_plus_rhs, join_lateral_rels))
1330                                 return false;   /* will not be able to join to some RHS rel */
1331                 }
1332         }
1333
1334         /* Otherwise, it's a valid join */
1335         *sjinfo_p = match_sjinfo;
1336         *reversed_p = reversed;
1337         return true;
1338 }
1339
1340
1341 /*
1342  * has_join_restriction
1343  *              Detect whether the specified relation has join-order restrictions,
1344  *              due to being inside an outer join or an IN (sub-SELECT),
1345  *              or participating in any LATERAL references or multi-rel PHVs.
1346  *
1347  * Essentially, this tests whether have_join_order_restriction() could
1348  * succeed with this rel and some other one.  It's OK if we sometimes
1349  * say "true" incorrectly.  (Therefore, we don't bother with the relatively
1350  * expensive has_legal_joinclause test.)
1351  */
1352 static bool
1353 has_join_restriction(PlannerInfo *root, RelOptInfo *rel)
1354 {
1355         ListCell   *l;
1356
1357         if (rel->lateral_relids != NULL || rel->lateral_referencers != NULL)
1358                 return true;
1359
1360         foreach(l, root->placeholder_list)
1361         {
1362                 PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l);
1363
1364                 if (bms_is_subset(rel->relids, phinfo->ph_eval_at) &&
1365                         !bms_equal(rel->relids, phinfo->ph_eval_at))
1366                         return true;
1367         }
1368
1369         foreach(l, root->join_info_list)
1370         {
1371                 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1372
1373                 /* ignore full joins --- other mechanisms preserve their ordering */
1374                 if (sjinfo->jointype == JOIN_FULL)
1375                         continue;
1376
1377                 /* ignore if SJ is already contained in rel */
1378                 if (bms_is_subset(sjinfo->min_lefthand, rel->relids) &&
1379                         bms_is_subset(sjinfo->min_righthand, rel->relids))
1380                         continue;
1381
1382                 /* restricted if it overlaps LHS or RHS, but doesn't contain SJ */
1383                 if (bms_overlap(sjinfo->min_lefthand, rel->relids) ||
1384                         bms_overlap(sjinfo->min_righthand, rel->relids))
1385                         return true;
1386         }
1387
1388         return false;
1389 }
1390
1391
1392 /*
1393  * Mark a relation as proven empty.
1394  *
1395  * During GEQO planning, this can get invoked more than once on the same
1396  * baserel struct, so it's worth checking to see if the rel is already marked
1397  * dummy.
1398  *
1399  * Also, when called during GEQO join planning, we are in a short-lived
1400  * memory context.  We must make sure that the dummy path attached to a
1401  * baserel survives the GEQO cycle, else the baserel is trashed for future
1402  * GEQO cycles.  On the other hand, when we are marking a joinrel during GEQO,
1403  * we don't want the dummy path to clutter the main planning context.  Upshot
1404  * is that the best solution is to explicitly make the dummy path in the same
1405  * context the given RelOptInfo is in.
1406  */
1407 static void
1408 mark_dummy_rel(RelOptInfo *rel)
1409 {
1410         MemoryContext oldcontext;
1411
1412         /* Already marked? */
1413         if (is_dummy_rel(rel))
1414                 return;
1415
1416         /* No, so choose correct context to make the dummy path in */
1417         oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1418
1419         /* Set dummy size estimate */
1420         rel->rows = 0;
1421
1422         /* Evict any previously chosen paths */
1423         rel->pathlist = NIL;
1424         rel->partial_pathlist = NIL;
1425
1426         /* Set up the dummy path */
1427         add_path(rel, (Path *) create_append_path(rel, NIL,
1428                                                                                           rel->lateral_relids,
1429                                                                                           0, NIL));
1430
1431         /* Set or update cheapest_total_path and related fields */
1432         set_cheapest(rel);
1433
1434         MemoryContextSwitchTo(oldcontext);
1435 }
1436
1437
1438 /*
1439  * restriction_is_constant_false --- is a restrictlist just false?
1440  *
1441  * In cases where a qual is provably constant false, eval_const_expressions
1442  * will generally have thrown away anything that's ANDed with it.  In outer
1443  * join situations this will leave us computing cartesian products only to
1444  * decide there's no match for an outer row, which is pretty stupid.  So,
1445  * we need to detect the case.
1446  *
1447  * If only_pushed_down is true, then consider only quals that are pushed-down
1448  * from the point of view of the joinrel.
1449  */
1450 static bool
1451 restriction_is_constant_false(List *restrictlist,
1452                                                           RelOptInfo *joinrel,
1453                                                           bool only_pushed_down)
1454 {
1455         ListCell   *lc;
1456
1457         /*
1458          * Despite the above comment, the restriction list we see here might
1459          * possibly have other members besides the FALSE constant, since other
1460          * quals could get "pushed down" to the outer join level.  So we check
1461          * each member of the list.
1462          */
1463         foreach(lc, restrictlist)
1464         {
1465                 RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
1466
1467                 if (only_pushed_down && !RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
1468                         continue;
1469
1470                 if (rinfo->clause && IsA(rinfo->clause, Const))
1471                 {
1472                         Const      *con = (Const *) rinfo->clause;
1473
1474                         /* constant NULL is as good as constant FALSE for our purposes */
1475                         if (con->constisnull)
1476                                 return true;
1477                         if (!DatumGetBool(con->constvalue))
1478                                 return true;
1479                 }
1480         }
1481         return false;
1482 }