OSDN Git Service

Do make install for rpmbuild
[pghintplan/pg_hint_plan.git] / core.c
1 /*-------------------------------------------------------------------------
2  *
3  * core.c
4  *        Routines copied from PostgreSQL core distribution.
5  *
6
7  * The main purpose of this files is having access to static functions in core.
8  * Another purpose is tweaking functions behavior by replacing part of them by
9  * macro definitions. See at the end of pg_hint_plan.c for details. Anyway,
10  * this file *must* contain required functions without making any change.
11  *
12  * This file contains the following functions from corresponding files.
13  *
14  * src/backend/optimizer/path/allpaths.c
15  *
16  *      static functions:
17  *     set_plain_rel_pathlist()
18  *     create_plain_partial_paths()
19  *     set_append_rel_pathlist()
20  *     generate_mergeappend_paths()
21  *     get_cheapest_parameterized_child_path()
22  *     accumulate_append_subpath()
23  *
24  *  public functions:
25  *     standard_join_search(): This funcion is not static. The reason for
26  *        including this function is make_rels_by_clause_joins. In order to
27  *        avoid generating apparently unwanted join combination, we decided to
28  *        change the behavior of make_join_rel, which is called under this
29  *        function.
30  *
31  * src/backend/optimizer/path/joinrels.c
32  *
33  *      public functions:
34  *     join_search_one_level(): We have to modify this to call my definition of
35  *                  make_rels_by_clause_joins.
36  *
37  *      static functions:
38  *     make_rels_by_clause_joins()
39  *     make_rels_by_clauseless_joins()
40  *     join_is_legal()
41  *     has_join_restriction()
42  *     is_dummy_rel()
43  *     mark_dummy_rel()
44  *     restriction_is_constant_false()
45  *
46  *
47  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
48  * Portions Copyright (c) 1994, Regents of the University of California
49  *
50  *-------------------------------------------------------------------------
51  */
52
53
54 /*
55  * set_plain_rel_pathlist
56  *        Build access paths for a plain relation (no subquery, no inheritance)
57  */
58 static void
59 set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
60 {
61         Relids          required_outer;
62
63         /*
64          * We don't support pushing join clauses into the quals of a seqscan, but
65          * it could still have required parameterization due to LATERAL refs in
66          * its tlist.
67          */
68         required_outer = rel->lateral_relids;
69
70         /* Consider sequential scan */
71         add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
72
73         /* If appropriate, consider parallel sequential scan */
74         if (rel->consider_parallel && required_outer == NULL)
75                 create_plain_partial_paths(root, rel);
76
77         /* Consider index scans */
78         create_index_paths(root, rel);
79
80         /* Consider TID scans */
81         create_tidscan_paths(root, rel);
82 }
83
84 /*
85  * create_plain_partial_paths
86  *        Build partial access paths for parallel scan of a plain relation
87  */
88 static void
89 create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
90 {
91         int                     parallel_workers;
92
93         /*
94          * If the user has set the parallel_workers reloption, use that; otherwise
95          * select a default number of workers.
96          */
97         if (rel->rel_parallel_workers != -1)
98                 parallel_workers = rel->rel_parallel_workers;
99         else
100         {
101                 int                     parallel_threshold;
102
103                 /*
104                  * If this relation is too small to be worth a parallel scan, just
105                  * return without doing anything ... unless it's an inheritance child.
106                  * In that case, we want to generate a parallel path here anyway.  It
107                  * might not be worthwhile just for this relation, but when combined
108                  * with all of its inheritance siblings it may well pay off.
109                  */
110                 if (rel->pages < (BlockNumber) min_parallel_relation_size &&
111                         rel->reloptkind == RELOPT_BASEREL)
112                         return;
113
114                 /*
115                  * Select the number of workers based on the log of the size of the
116                  * relation.  This probably needs to be a good deal more
117                  * sophisticated, but we need something here for now.  Note that the
118                  * upper limit of the min_parallel_relation_size GUC is chosen to
119                  * prevent overflow here.
120                  */
121                 parallel_workers = 1;
122                 parallel_threshold = Max(min_parallel_relation_size, 1);
123                 while (rel->pages >= (BlockNumber) (parallel_threshold * 3))
124                 {
125                         parallel_workers++;
126                         parallel_threshold *= 3;
127                         if (parallel_threshold > INT_MAX / 3)
128                                 break;                  /* avoid overflow */
129                 }
130         }
131
132         /*
133          * In no case use more than max_parallel_workers_per_gather workers.
134          */
135         parallel_workers = Min(parallel_workers, max_parallel_workers_per_gather);
136
137         /* If any limit was set to zero, the user doesn't want a parallel scan. */
138         if (parallel_workers <= 0)
139                 return;
140
141         /* Add an unordered partial path based on a parallel sequential scan. */
142         add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
143 }
144
145 /*
146  * set_append_rel_pathlist
147  *        Build access paths for an "append relation"
148  */
149 static void
150 set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
151                                                 Index rti, RangeTblEntry *rte)
152 {
153         int                     parentRTindex = rti;
154         List       *live_childrels = NIL;
155         List       *subpaths = NIL;
156         bool            subpaths_valid = true;
157         List       *partial_subpaths = NIL;
158         bool            partial_subpaths_valid = true;
159         List       *all_child_pathkeys = NIL;
160         List       *all_child_outers = NIL;
161         ListCell   *l;
162
163         /*
164          * Generate access paths for each member relation, and remember the
165          * cheapest path for each one.  Also, identify all pathkeys (orderings)
166          * and parameterizations (required_outer sets) available for the member
167          * relations.
168          */
169         foreach(l, root->append_rel_list)
170         {
171                 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
172                 int                     childRTindex;
173                 RangeTblEntry *childRTE;
174                 RelOptInfo *childrel;
175                 ListCell   *lcp;
176
177                 /* append_rel_list contains all append rels; ignore others */
178                 if (appinfo->parent_relid != parentRTindex)
179                         continue;
180
181                 /* Re-locate the child RTE and RelOptInfo */
182                 childRTindex = appinfo->child_relid;
183                 childRTE = root->simple_rte_array[childRTindex];
184                 childrel = root->simple_rel_array[childRTindex];
185
186                 /*
187                  * If set_append_rel_size() decided the parent appendrel was
188                  * parallel-unsafe at some point after visiting this child rel, we
189                  * need to propagate the unsafety marking down to the child, so that
190                  * we don't generate useless partial paths for it.
191                  */
192                 if (!rel->consider_parallel)
193                         childrel->consider_parallel = false;
194
195                 /*
196                  * Compute the child's access paths.
197                  */
198                 set_rel_pathlist(root, childrel, childRTindex, childRTE);
199
200                 /*
201                  * If child is dummy, ignore it.
202                  */
203                 if (IS_DUMMY_REL(childrel))
204                         continue;
205
206                 /*
207                  * Child is live, so add it to the live_childrels list for use below.
208                  */
209                 live_childrels = lappend(live_childrels, childrel);
210
211                 /*
212                  * If child has an unparameterized cheapest-total path, add that to
213                  * the unparameterized Append path we are constructing for the parent.
214                  * If not, there's no workable unparameterized path.
215                  */
216                 if (childrel->cheapest_total_path->param_info == NULL)
217                         subpaths = accumulate_append_subpath(subpaths,
218                                                                                           childrel->cheapest_total_path);
219                 else
220                         subpaths_valid = false;
221
222                 /* Same idea, but for a partial plan. */
223                 if (childrel->partial_pathlist != NIL)
224                         partial_subpaths = accumulate_append_subpath(partial_subpaths,
225                                                                            linitial(childrel->partial_pathlist));
226                 else
227                         partial_subpaths_valid = false;
228
229                 /*
230                  * Collect lists of all the available path orderings and
231                  * parameterizations for all the children.  We use these as a
232                  * heuristic to indicate which sort orderings and parameterizations we
233                  * should build Append and MergeAppend paths for.
234                  */
235                 foreach(lcp, childrel->pathlist)
236                 {
237                         Path       *childpath = (Path *) lfirst(lcp);
238                         List       *childkeys = childpath->pathkeys;
239                         Relids          childouter = PATH_REQ_OUTER(childpath);
240
241                         /* Unsorted paths don't contribute to pathkey list */
242                         if (childkeys != NIL)
243                         {
244                                 ListCell   *lpk;
245                                 bool            found = false;
246
247                                 /* Have we already seen this ordering? */
248                                 foreach(lpk, all_child_pathkeys)
249                                 {
250                                         List       *existing_pathkeys = (List *) lfirst(lpk);
251
252                                         if (compare_pathkeys(existing_pathkeys,
253                                                                                  childkeys) == PATHKEYS_EQUAL)
254                                         {
255                                                 found = true;
256                                                 break;
257                                         }
258                                 }
259                                 if (!found)
260                                 {
261                                         /* No, so add it to all_child_pathkeys */
262                                         all_child_pathkeys = lappend(all_child_pathkeys,
263                                                                                                  childkeys);
264                                 }
265                         }
266
267                         /* Unparameterized paths don't contribute to param-set list */
268                         if (childouter)
269                         {
270                                 ListCell   *lco;
271                                 bool            found = false;
272
273                                 /* Have we already seen this param set? */
274                                 foreach(lco, all_child_outers)
275                                 {
276                                         Relids          existing_outers = (Relids) lfirst(lco);
277
278                                         if (bms_equal(existing_outers, childouter))
279                                         {
280                                                 found = true;
281                                                 break;
282                                         }
283                                 }
284                                 if (!found)
285                                 {
286                                         /* No, so add it to all_child_outers */
287                                         all_child_outers = lappend(all_child_outers,
288                                                                                            childouter);
289                                 }
290                         }
291                 }
292         }
293
294         /*
295          * If we found unparameterized paths for all children, build an unordered,
296          * unparameterized Append path for the rel.  (Note: this is correct even
297          * if we have zero or one live subpath due to constraint exclusion.)
298          */
299         if (subpaths_valid)
300                 add_path(rel, (Path *) create_append_path(rel, subpaths, NULL, 0));
301
302         /*
303          * Consider an append of partial unordered, unparameterized partial paths.
304          */
305         if (partial_subpaths_valid)
306         {
307                 AppendPath *appendpath;
308                 ListCell   *lc;
309                 int                     parallel_workers = 0;
310
311                 /*
312                  * Decide on the number of workers to request for this append path.
313                  * For now, we just use the maximum value from among the members.  It
314                  * might be useful to use a higher number if the Append node were
315                  * smart enough to spread out the workers, but it currently isn't.
316                  */
317                 foreach(lc, partial_subpaths)
318                 {
319                         Path       *path = lfirst(lc);
320
321                         parallel_workers = Max(parallel_workers, path->parallel_workers);
322                 }
323                 Assert(parallel_workers > 0);
324
325                 /* Generate a partial append path. */
326                 appendpath = create_append_path(rel, partial_subpaths, NULL,
327                                                                                 parallel_workers);
328                 add_partial_path(rel, (Path *) appendpath);
329         }
330
331         /*
332          * Also build unparameterized MergeAppend paths based on the collected
333          * list of child pathkeys.
334          */
335         if (subpaths_valid)
336                 generate_mergeappend_paths(root, rel, live_childrels,
337                                                                    all_child_pathkeys);
338
339         /*
340          * Build Append paths for each parameterization seen among the child rels.
341          * (This may look pretty expensive, but in most cases of practical
342          * interest, the child rels will expose mostly the same parameterizations,
343          * so that not that many cases actually get considered here.)
344          *
345          * The Append node itself cannot enforce quals, so all qual checking must
346          * be done in the child paths.  This means that to have a parameterized
347          * Append path, we must have the exact same parameterization for each
348          * child path; otherwise some children might be failing to check the
349          * moved-down quals.  To make them match up, we can try to increase the
350          * parameterization of lesser-parameterized paths.
351          */
352         foreach(l, all_child_outers)
353         {
354                 Relids          required_outer = (Relids) lfirst(l);
355                 ListCell   *lcr;
356
357                 /* Select the child paths for an Append with this parameterization */
358                 subpaths = NIL;
359                 subpaths_valid = true;
360                 foreach(lcr, live_childrels)
361                 {
362                         RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
363                         Path       *subpath;
364
365                         subpath = get_cheapest_parameterized_child_path(root,
366                                                                                                                         childrel,
367                                                                                                                         required_outer);
368                         if (subpath == NULL)
369                         {
370                                 /* failed to make a suitable path for this child */
371                                 subpaths_valid = false;
372                                 break;
373                         }
374                         subpaths = accumulate_append_subpath(subpaths, subpath);
375                 }
376
377                 if (subpaths_valid)
378                         add_path(rel, (Path *)
379                                          create_append_path(rel, subpaths, required_outer, 0));
380         }
381 }
382
383 /*
384  * generate_mergeappend_paths
385  *              Generate MergeAppend paths for an append relation
386  *
387  * Generate a path for each ordering (pathkey list) appearing in
388  * all_child_pathkeys.
389  *
390  * We consider both cheapest-startup and cheapest-total cases, ie, for each
391  * interesting ordering, collect all the cheapest startup subpaths and all the
392  * cheapest total paths, and build a MergeAppend path for each case.
393  *
394  * We don't currently generate any parameterized MergeAppend paths.  While
395  * it would not take much more code here to do so, it's very unclear that it
396  * is worth the planning cycles to investigate such paths: there's little
397  * use for an ordered path on the inside of a nestloop.  In fact, it's likely
398  * that the current coding of add_path would reject such paths out of hand,
399  * because add_path gives no credit for sort ordering of parameterized paths,
400  * and a parameterized MergeAppend is going to be more expensive than the
401  * corresponding parameterized Append path.  If we ever try harder to support
402  * parameterized mergejoin plans, it might be worth adding support for
403  * parameterized MergeAppends to feed such joins.  (See notes in
404  * optimizer/README for why that might not ever happen, though.)
405  */
406 static void
407 generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
408                                                    List *live_childrels,
409                                                    List *all_child_pathkeys)
410 {
411         ListCell   *lcp;
412
413         foreach(lcp, all_child_pathkeys)
414         {
415                 List       *pathkeys = (List *) lfirst(lcp);
416                 List       *startup_subpaths = NIL;
417                 List       *total_subpaths = NIL;
418                 bool            startup_neq_total = false;
419                 ListCell   *lcr;
420
421                 /* Select the child paths for this ordering... */
422                 foreach(lcr, live_childrels)
423                 {
424                         RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
425                         Path       *cheapest_startup,
426                                            *cheapest_total;
427
428                         /* Locate the right paths, if they are available. */
429                         cheapest_startup =
430                                 get_cheapest_path_for_pathkeys(childrel->pathlist,
431                                                                                            pathkeys,
432                                                                                            NULL,
433                                                                                            STARTUP_COST);
434                         cheapest_total =
435                                 get_cheapest_path_for_pathkeys(childrel->pathlist,
436                                                                                            pathkeys,
437                                                                                            NULL,
438                                                                                            TOTAL_COST);
439
440                         /*
441                          * If we can't find any paths with the right order just use the
442                          * cheapest-total path; we'll have to sort it later.
443                          */
444                         if (cheapest_startup == NULL || cheapest_total == NULL)
445                         {
446                                 cheapest_startup = cheapest_total =
447                                         childrel->cheapest_total_path;
448                                 /* Assert we do have an unparameterized path for this child */
449                                 Assert(cheapest_total->param_info == NULL);
450                         }
451
452                         /*
453                          * Notice whether we actually have different paths for the
454                          * "cheapest" and "total" cases; frequently there will be no point
455                          * in two create_merge_append_path() calls.
456                          */
457                         if (cheapest_startup != cheapest_total)
458                                 startup_neq_total = true;
459
460                         startup_subpaths =
461                                 accumulate_append_subpath(startup_subpaths, cheapest_startup);
462                         total_subpaths =
463                                 accumulate_append_subpath(total_subpaths, cheapest_total);
464                 }
465
466                 /* ... and build the MergeAppend paths */
467                 add_path(rel, (Path *) create_merge_append_path(root,
468                                                                                                                 rel,
469                                                                                                                 startup_subpaths,
470                                                                                                                 pathkeys,
471                                                                                                                 NULL));
472                 if (startup_neq_total)
473                         add_path(rel, (Path *) create_merge_append_path(root,
474                                                                                                                         rel,
475                                                                                                                         total_subpaths,
476                                                                                                                         pathkeys,
477                                                                                                                         NULL));
478         }
479 }
480
481 /*
482  * get_cheapest_parameterized_child_path
483  *              Get cheapest path for this relation that has exactly the requested
484  *              parameterization.
485  *
486  * Returns NULL if unable to create such a path.
487  */
488 static Path *
489 get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
490                                                                           Relids required_outer)
491 {
492         Path       *cheapest;
493         ListCell   *lc;
494
495         /*
496          * Look up the cheapest existing path with no more than the needed
497          * parameterization.  If it has exactly the needed parameterization, we're
498          * done.
499          */
500         cheapest = get_cheapest_path_for_pathkeys(rel->pathlist,
501                                                                                           NIL,
502                                                                                           required_outer,
503                                                                                           TOTAL_COST);
504         Assert(cheapest != NULL);
505         if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
506                 return cheapest;
507
508         /*
509          * Otherwise, we can "reparameterize" an existing path to match the given
510          * parameterization, which effectively means pushing down additional
511          * joinquals to be checked within the path's scan.  However, some existing
512          * paths might check the available joinquals already while others don't;
513          * therefore, it's not clear which existing path will be cheapest after
514          * reparameterization.  We have to go through them all and find out.
515          */
516         cheapest = NULL;
517         foreach(lc, rel->pathlist)
518         {
519                 Path       *path = (Path *) lfirst(lc);
520
521                 /* Can't use it if it needs more than requested parameterization */
522                 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
523                         continue;
524
525                 /*
526                  * Reparameterization can only increase the path's cost, so if it's
527                  * already more expensive than the current cheapest, forget it.
528                  */
529                 if (cheapest != NULL &&
530                         compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
531                         continue;
532
533                 /* Reparameterize if needed, then recheck cost */
534                 if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
535                 {
536                         path = reparameterize_path(root, path, required_outer, 1.0);
537                         if (path == NULL)
538                                 continue;               /* failed to reparameterize this one */
539                         Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
540
541                         if (cheapest != NULL &&
542                                 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
543                                 continue;
544                 }
545
546                 /* We have a new best path */
547                 cheapest = path;
548         }
549
550         /* Return the best path, or NULL if we found no suitable candidate */
551         return cheapest;
552 }
553
554 /*
555  * accumulate_append_subpath
556  *              Add a subpath to the list being built for an Append or MergeAppend
557  *
558  * It's possible that the child is itself an Append or MergeAppend path, in
559  * which case we can "cut out the middleman" and just add its child paths to
560  * our own list.  (We don't try to do this earlier because we need to apply
561  * both levels of transformation to the quals.)
562  *
563  * Note that if we omit a child MergeAppend in this way, we are effectively
564  * omitting a sort step, which seems fine: if the parent is to be an Append,
565  * its result would be unsorted anyway, while if the parent is to be a
566  * MergeAppend, there's no point in a separate sort on a child.
567  */
568 static List *
569 accumulate_append_subpath(List *subpaths, Path *path)
570 {
571         if (IsA(path, AppendPath))
572         {
573                 AppendPath *apath = (AppendPath *) path;
574
575                 /* list_copy is important here to avoid sharing list substructure */
576                 return list_concat(subpaths, list_copy(apath->subpaths));
577         }
578         else if (IsA(path, MergeAppendPath))
579         {
580                 MergeAppendPath *mpath = (MergeAppendPath *) path;
581
582                 /* list_copy is important here to avoid sharing list substructure */
583                 return list_concat(subpaths, list_copy(mpath->subpaths));
584         }
585         else
586                 return lappend(subpaths, path);
587 }
588
589 /*
590  * standard_join_search
591  *        Find possible joinpaths for a query by successively finding ways
592  *        to join component relations into join relations.
593  *
594  * 'levels_needed' is the number of iterations needed, ie, the number of
595  *              independent jointree items in the query.  This is > 1.
596  *
597  * 'initial_rels' is a list of RelOptInfo nodes for each independent
598  *              jointree item.  These are the components to be joined together.
599  *              Note that levels_needed == list_length(initial_rels).
600  *
601  * Returns the final level of join relations, i.e., the relation that is
602  * the result of joining all the original relations together.
603  * At least one implementation path must be provided for this relation and
604  * all required sub-relations.
605  *
606  * To support loadable plugins that modify planner behavior by changing the
607  * join searching algorithm, we provide a hook variable that lets a plugin
608  * replace or supplement this function.  Any such hook must return the same
609  * final join relation as the standard code would, but it might have a
610  * different set of implementation paths attached, and only the sub-joinrels
611  * needed for these paths need have been instantiated.
612  *
613  * Note to plugin authors: the functions invoked during standard_join_search()
614  * modify root->join_rel_list and root->join_rel_hash.  If you want to do more
615  * than one join-order search, you'll probably need to save and restore the
616  * original states of those data structures.  See geqo_eval() for an example.
617  */
618 RelOptInfo *
619 standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
620 {
621         int                     lev;
622         RelOptInfo *rel;
623
624         /*
625          * This function cannot be invoked recursively within any one planning
626          * problem, so join_rel_level[] can't be in use already.
627          */
628         Assert(root->join_rel_level == NULL);
629
630         /*
631          * We employ a simple "dynamic programming" algorithm: we first find all
632          * ways to build joins of two jointree items, then all ways to build joins
633          * of three items (from two-item joins and single items), then four-item
634          * joins, and so on until we have considered all ways to join all the
635          * items into one rel.
636          *
637          * root->join_rel_level[j] is a list of all the j-item rels.  Initially we
638          * set root->join_rel_level[1] to represent all the single-jointree-item
639          * relations.
640          */
641         root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
642
643         root->join_rel_level[1] = initial_rels;
644
645         for (lev = 2; lev <= levels_needed; lev++)
646         {
647                 ListCell   *lc;
648
649                 /*
650                  * Determine all possible pairs of relations to be joined at this
651                  * level, and build paths for making each one from every available
652                  * pair of lower-level relations.
653                  */
654                 join_search_one_level(root, lev);
655
656                 /*
657                  * Run generate_gather_paths() for each just-processed joinrel.  We
658                  * could not do this earlier because both regular and partial paths
659                  * can get added to a particular joinrel at multiple times within
660                  * join_search_one_level.  After that, we're done creating paths for
661                  * the joinrel, so run set_cheapest().
662                  */
663                 foreach(lc, root->join_rel_level[lev])
664                 {
665                         rel = (RelOptInfo *) lfirst(lc);
666
667                         /* Create GatherPaths for any useful partial paths for rel */
668                         generate_gather_paths(root, rel);
669
670                         /* Find and save the cheapest paths for this rel */
671                         set_cheapest(rel);
672
673 #ifdef OPTIMIZER_DEBUG
674                         debug_print_rel(root, rel);
675 #endif
676                 }
677         }
678
679         /*
680          * We should have a single rel at the final level.
681          */
682         if (root->join_rel_level[levels_needed] == NIL)
683                 elog(ERROR, "failed to build any %d-way joins", levels_needed);
684         Assert(list_length(root->join_rel_level[levels_needed]) == 1);
685
686         rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
687
688         root->join_rel_level = NULL;
689
690         return rel;
691 }
692
693 /*
694  * join_search_one_level
695  *        Consider ways to produce join relations containing exactly 'level'
696  *        jointree items.  (This is one step of the dynamic-programming method
697  *        embodied in standard_join_search.)  Join rel nodes for each feasible
698  *        combination of lower-level rels are created and returned in a list.
699  *        Implementation paths are created for each such joinrel, too.
700  *
701  * level: level of rels we want to make this time
702  * root->join_rel_level[j], 1 <= j < level, is a list of rels containing j items
703  *
704  * The result is returned in root->join_rel_level[level].
705  */
706 void
707 join_search_one_level(PlannerInfo *root, int level)
708 {
709         List      **joinrels = root->join_rel_level;
710         ListCell   *r;
711         int                     k;
712
713         Assert(joinrels[level] == NIL);
714
715         /* Set join_cur_level so that new joinrels are added to proper list */
716         root->join_cur_level = level;
717
718         /*
719          * First, consider left-sided and right-sided plans, in which rels of
720          * exactly level-1 member relations are joined against initial relations.
721          * We prefer to join using join clauses, but if we find a rel of level-1
722          * members that has no join clauses, we will generate Cartesian-product
723          * joins against all initial rels not already contained in it.
724          */
725         foreach(r, joinrels[level - 1])
726         {
727                 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
728
729                 if (old_rel->joininfo != NIL || old_rel->has_eclass_joins ||
730                         has_join_restriction(root, old_rel))
731                 {
732                         /*
733                          * There are join clauses or join order restrictions relevant to
734                          * this rel, so consider joins between this rel and (only) those
735                          * initial rels it is linked to by a clause or restriction.
736                          *
737                          * At level 2 this condition is symmetric, so there is no need to
738                          * look at initial rels before this one in the list; we already
739                          * considered such joins when we were at the earlier rel.  (The
740                          * mirror-image joins are handled automatically by make_join_rel.)
741                          * In later passes (level > 2), we join rels of the previous level
742                          * to each initial rel they don't already include but have a join
743                          * clause or restriction with.
744                          */
745                         ListCell   *other_rels;
746
747                         if (level == 2)         /* consider remaining initial rels */
748                                 other_rels = lnext(r);
749                         else    /* consider all initial rels */
750                                 other_rels = list_head(joinrels[1]);
751
752                         make_rels_by_clause_joins(root,
753                                                                           old_rel,
754                                                                           other_rels);
755                 }
756                 else
757                 {
758                         /*
759                          * Oops, we have a relation that is not joined to any other
760                          * relation, either directly or by join-order restrictions.
761                          * Cartesian product time.
762                          *
763                          * We consider a cartesian product with each not-already-included
764                          * initial rel, whether it has other join clauses or not.  At
765                          * level 2, if there are two or more clauseless initial rels, we
766                          * will redundantly consider joining them in both directions; but
767                          * such cases aren't common enough to justify adding complexity to
768                          * avoid the duplicated effort.
769                          */
770                         make_rels_by_clauseless_joins(root,
771                                                                                   old_rel,
772                                                                                   list_head(joinrels[1]));
773                 }
774         }
775
776         /*
777          * Now, consider "bushy plans" in which relations of k initial rels are
778          * joined to relations of level-k initial rels, for 2 <= k <= level-2.
779          *
780          * We only consider bushy-plan joins for pairs of rels where there is a
781          * suitable join clause (or join order restriction), in order to avoid
782          * unreasonable growth of planning time.
783          */
784         for (k = 2;; k++)
785         {
786                 int                     other_level = level - k;
787
788                 /*
789                  * Since make_join_rel(x, y) handles both x,y and y,x cases, we only
790                  * need to go as far as the halfway point.
791                  */
792                 if (k > other_level)
793                         break;
794
795                 foreach(r, joinrels[k])
796                 {
797                         RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
798                         ListCell   *other_rels;
799                         ListCell   *r2;
800
801                         /*
802                          * We can ignore relations without join clauses here, unless they
803                          * participate in join-order restrictions --- then we might have
804                          * to force a bushy join plan.
805                          */
806                         if (old_rel->joininfo == NIL && !old_rel->has_eclass_joins &&
807                                 !has_join_restriction(root, old_rel))
808                                 continue;
809
810                         if (k == other_level)
811                                 other_rels = lnext(r);  /* only consider remaining rels */
812                         else
813                                 other_rels = list_head(joinrels[other_level]);
814
815                         for_each_cell(r2, other_rels)
816                         {
817                                 RelOptInfo *new_rel = (RelOptInfo *) lfirst(r2);
818
819                                 if (!bms_overlap(old_rel->relids, new_rel->relids))
820                                 {
821                                         /*
822                                          * OK, we can build a rel of the right level from this
823                                          * pair of rels.  Do so if there is at least one relevant
824                                          * join clause or join order restriction.
825                                          */
826                                         if (have_relevant_joinclause(root, old_rel, new_rel) ||
827                                                 have_join_order_restriction(root, old_rel, new_rel))
828                                         {
829                                                 (void) make_join_rel(root, old_rel, new_rel);
830                                         }
831                                 }
832                         }
833                 }
834         }
835
836         /*----------
837          * Last-ditch effort: if we failed to find any usable joins so far, force
838          * a set of cartesian-product joins to be generated.  This handles the
839          * special case where all the available rels have join clauses but we
840          * cannot use any of those clauses yet.  This can only happen when we are
841          * considering a join sub-problem (a sub-joinlist) and all the rels in the
842          * sub-problem have only join clauses with rels outside the sub-problem.
843          * An example is
844          *
845          *              SELECT ... FROM a INNER JOIN b ON TRUE, c, d, ...
846          *              WHERE a.w = c.x and b.y = d.z;
847          *
848          * If the "a INNER JOIN b" sub-problem does not get flattened into the
849          * upper level, we must be willing to make a cartesian join of a and b;
850          * but the code above will not have done so, because it thought that both
851          * a and b have joinclauses.  We consider only left-sided and right-sided
852          * cartesian joins in this case (no bushy).
853          *----------
854          */
855         if (joinrels[level] == NIL)
856         {
857                 /*
858                  * This loop is just like the first one, except we always call
859                  * make_rels_by_clauseless_joins().
860                  */
861                 foreach(r, joinrels[level - 1])
862                 {
863                         RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
864
865                         make_rels_by_clauseless_joins(root,
866                                                                                   old_rel,
867                                                                                   list_head(joinrels[1]));
868                 }
869
870                 /*----------
871                  * When special joins are involved, there may be no legal way
872                  * to make an N-way join for some values of N.  For example consider
873                  *
874                  * SELECT ... FROM t1 WHERE
875                  *       x IN (SELECT ... FROM t2,t3 WHERE ...) AND
876                  *       y IN (SELECT ... FROM t4,t5 WHERE ...)
877                  *
878                  * We will flatten this query to a 5-way join problem, but there are
879                  * no 4-way joins that join_is_legal() will consider legal.  We have
880                  * to accept failure at level 4 and go on to discover a workable
881                  * bushy plan at level 5.
882                  *
883                  * However, if there are no special joins and no lateral references
884                  * then join_is_legal() should never fail, and so the following sanity
885                  * check is useful.
886                  *----------
887                  */
888                 if (joinrels[level] == NIL &&
889                         root->join_info_list == NIL &&
890                         !root->hasLateralRTEs)
891                         elog(ERROR, "failed to build any %d-way joins", level);
892         }
893 }
894
895 /*
896  * make_rels_by_clause_joins
897  *        Build joins between the given relation 'old_rel' and other relations
898  *        that participate in join clauses that 'old_rel' also participates in
899  *        (or participate in join-order restrictions with it).
900  *        The join rels are returned in root->join_rel_level[join_cur_level].
901  *
902  * Note: at levels above 2 we will generate the same joined relation in
903  * multiple ways --- for example (a join b) join c is the same RelOptInfo as
904  * (b join c) join a, though the second case will add a different set of Paths
905  * to it.  This is the reason for using the join_rel_level mechanism, which
906  * automatically ensures that each new joinrel is only added to the list once.
907  *
908  * 'old_rel' is the relation entry for the relation to be joined
909  * 'other_rels': the first cell in a linked list containing the other
910  * rels to be considered for joining
911  *
912  * Currently, this is only used with initial rels in other_rels, but it
913  * will work for joining to joinrels too.
914  */
915 static void
916 make_rels_by_clause_joins(PlannerInfo *root,
917                                                   RelOptInfo *old_rel,
918                                                   ListCell *other_rels)
919 {
920         ListCell   *l;
921
922         for_each_cell(l, other_rels)
923         {
924                 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
925
926                 if (!bms_overlap(old_rel->relids, other_rel->relids) &&
927                         (have_relevant_joinclause(root, old_rel, other_rel) ||
928                          have_join_order_restriction(root, old_rel, other_rel)))
929                 {
930                         (void) make_join_rel(root, old_rel, other_rel);
931                 }
932         }
933 }
934
935 /*
936  * make_rels_by_clauseless_joins
937  *        Given a relation 'old_rel' and a list of other relations
938  *        'other_rels', create a join relation between 'old_rel' and each
939  *        member of 'other_rels' that isn't already included in 'old_rel'.
940  *        The join rels are returned in root->join_rel_level[join_cur_level].
941  *
942  * 'old_rel' is the relation entry for the relation to be joined
943  * 'other_rels': the first cell of a linked list containing the
944  * other rels to be considered for joining
945  *
946  * Currently, this is only used with initial rels in other_rels, but it would
947  * work for joining to joinrels too.
948  */
949 static void
950 make_rels_by_clauseless_joins(PlannerInfo *root,
951                                                           RelOptInfo *old_rel,
952                                                           ListCell *other_rels)
953 {
954         ListCell   *l;
955
956         for_each_cell(l, other_rels)
957         {
958                 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
959
960                 if (!bms_overlap(other_rel->relids, old_rel->relids))
961                 {
962                         (void) make_join_rel(root, old_rel, other_rel);
963                 }
964         }
965 }
966
967 /*
968  * join_is_legal
969  *         Determine whether a proposed join is legal given the query's
970  *         join order constraints; and if it is, determine the join type.
971  *
972  * Caller must supply not only the two rels, but the union of their relids.
973  * (We could simplify the API by computing joinrelids locally, but this
974  * would be redundant work in the normal path through make_join_rel.)
975  *
976  * On success, *sjinfo_p is set to NULL if this is to be a plain inner join,
977  * else it's set to point to the associated SpecialJoinInfo node.  Also,
978  * *reversed_p is set TRUE if the given relations need to be swapped to
979  * match the SpecialJoinInfo node.
980  */
981 static bool
982 join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
983                           Relids joinrelids,
984                           SpecialJoinInfo **sjinfo_p, bool *reversed_p)
985 {
986         SpecialJoinInfo *match_sjinfo;
987         bool            reversed;
988         bool            unique_ified;
989         bool            must_be_leftjoin;
990         ListCell   *l;
991
992         /*
993          * Ensure output params are set on failure return.  This is just to
994          * suppress uninitialized-variable warnings from overly anal compilers.
995          */
996         *sjinfo_p = NULL;
997         *reversed_p = false;
998
999         /*
1000          * If we have any special joins, the proposed join might be illegal; and
1001          * in any case we have to determine its join type.  Scan the join info
1002          * list for matches and conflicts.
1003          */
1004         match_sjinfo = NULL;
1005         reversed = false;
1006         unique_ified = false;
1007         must_be_leftjoin = false;
1008
1009         foreach(l, root->join_info_list)
1010         {
1011                 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1012
1013                 /*
1014                  * This special join is not relevant unless its RHS overlaps the
1015                  * proposed join.  (Check this first as a fast path for dismissing
1016                  * most irrelevant SJs quickly.)
1017                  */
1018                 if (!bms_overlap(sjinfo->min_righthand, joinrelids))
1019                         continue;
1020
1021                 /*
1022                  * Also, not relevant if proposed join is fully contained within RHS
1023                  * (ie, we're still building up the RHS).
1024                  */
1025                 if (bms_is_subset(joinrelids, sjinfo->min_righthand))
1026                         continue;
1027
1028                 /*
1029                  * Also, not relevant if SJ is already done within either input.
1030                  */
1031                 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
1032                         bms_is_subset(sjinfo->min_righthand, rel1->relids))
1033                         continue;
1034                 if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
1035                         bms_is_subset(sjinfo->min_righthand, rel2->relids))
1036                         continue;
1037
1038                 /*
1039                  * If it's a semijoin and we already joined the RHS to any other rels
1040                  * within either input, then we must have unique-ified the RHS at that
1041                  * point (see below).  Therefore the semijoin is no longer relevant in
1042                  * this join path.
1043                  */
1044                 if (sjinfo->jointype == JOIN_SEMI)
1045                 {
1046                         if (bms_is_subset(sjinfo->syn_righthand, rel1->relids) &&
1047                                 !bms_equal(sjinfo->syn_righthand, rel1->relids))
1048                                 continue;
1049                         if (bms_is_subset(sjinfo->syn_righthand, rel2->relids) &&
1050                                 !bms_equal(sjinfo->syn_righthand, rel2->relids))
1051                                 continue;
1052                 }
1053
1054                 /*
1055                  * If one input contains min_lefthand and the other contains
1056                  * min_righthand, then we can perform the SJ at this join.
1057                  *
1058                  * Reject if we get matches to more than one SJ; that implies we're
1059                  * considering something that's not really valid.
1060                  */
1061                 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
1062                         bms_is_subset(sjinfo->min_righthand, rel2->relids))
1063                 {
1064                         if (match_sjinfo)
1065                                 return false;   /* invalid join path */
1066                         match_sjinfo = sjinfo;
1067                         reversed = false;
1068                 }
1069                 else if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
1070                                  bms_is_subset(sjinfo->min_righthand, rel1->relids))
1071                 {
1072                         if (match_sjinfo)
1073                                 return false;   /* invalid join path */
1074                         match_sjinfo = sjinfo;
1075                         reversed = true;
1076                 }
1077                 else if (sjinfo->jointype == JOIN_SEMI &&
1078                                  bms_equal(sjinfo->syn_righthand, rel2->relids) &&
1079                                  create_unique_path(root, rel2, rel2->cheapest_total_path,
1080                                                                         sjinfo) != NULL)
1081                 {
1082                         /*----------
1083                          * For a semijoin, we can join the RHS to anything else by
1084                          * unique-ifying the RHS (if the RHS can be unique-ified).
1085                          * We will only get here if we have the full RHS but less
1086                          * than min_lefthand on the LHS.
1087                          *
1088                          * The reason to consider such a join path is exemplified by
1089                          *      SELECT ... FROM a,b WHERE (a.x,b.y) IN (SELECT c1,c2 FROM c)
1090                          * If we insist on doing this as a semijoin we will first have
1091                          * to form the cartesian product of A*B.  But if we unique-ify
1092                          * C then the semijoin becomes a plain innerjoin and we can join
1093                          * in any order, eg C to A and then to B.  When C is much smaller
1094                          * than A and B this can be a huge win.  So we allow C to be
1095                          * joined to just A or just B here, and then make_join_rel has
1096                          * to handle the case properly.
1097                          *
1098                          * Note that actually we'll allow unique-ified C to be joined to
1099                          * some other relation D here, too.  That is legal, if usually not
1100                          * very sane, and this routine is only concerned with legality not
1101                          * with whether the join is good strategy.
1102                          *----------
1103                          */
1104                         if (match_sjinfo)
1105                                 return false;   /* invalid join path */
1106                         match_sjinfo = sjinfo;
1107                         reversed = false;
1108                         unique_ified = true;
1109                 }
1110                 else if (sjinfo->jointype == JOIN_SEMI &&
1111                                  bms_equal(sjinfo->syn_righthand, rel1->relids) &&
1112                                  create_unique_path(root, rel1, rel1->cheapest_total_path,
1113                                                                         sjinfo) != NULL)
1114                 {
1115                         /* Reversed semijoin case */
1116                         if (match_sjinfo)
1117                                 return false;   /* invalid join path */
1118                         match_sjinfo = sjinfo;
1119                         reversed = true;
1120                         unique_ified = true;
1121                 }
1122                 else
1123                 {
1124                         /*
1125                          * Otherwise, the proposed join overlaps the RHS but isn't a valid
1126                          * implementation of this SJ.  But don't panic quite yet: the RHS
1127                          * violation might have occurred previously, in one or both input
1128                          * relations, in which case we must have previously decided that
1129                          * it was OK to commute some other SJ with this one.  If we need
1130                          * to perform this join to finish building up the RHS, rejecting
1131                          * it could lead to not finding any plan at all.  (This can occur
1132                          * because of the heuristics elsewhere in this file that postpone
1133                          * clauseless joins: we might not consider doing a clauseless join
1134                          * within the RHS until after we've performed other, validly
1135                          * commutable SJs with one or both sides of the clauseless join.)
1136                          * This consideration boils down to the rule that if both inputs
1137                          * overlap the RHS, we can allow the join --- they are either
1138                          * fully within the RHS, or represent previously-allowed joins to
1139                          * rels outside it.
1140                          */
1141                         if (bms_overlap(rel1->relids, sjinfo->min_righthand) &&
1142                                 bms_overlap(rel2->relids, sjinfo->min_righthand))
1143                                 continue;               /* assume valid previous violation of RHS */
1144
1145                         /*
1146                          * The proposed join could still be legal, but only if we're
1147                          * allowed to associate it into the RHS of this SJ.  That means
1148                          * this SJ must be a LEFT join (not SEMI or ANTI, and certainly
1149                          * not FULL) and the proposed join must not overlap the LHS.
1150                          */
1151                         if (sjinfo->jointype != JOIN_LEFT ||
1152                                 bms_overlap(joinrelids, sjinfo->min_lefthand))
1153                                 return false;   /* invalid join path */
1154
1155                         /*
1156                          * To be valid, the proposed join must be a LEFT join; otherwise
1157                          * it can't associate into this SJ's RHS.  But we may not yet have
1158                          * found the SpecialJoinInfo matching the proposed join, so we
1159                          * can't test that yet.  Remember the requirement for later.
1160                          */
1161                         must_be_leftjoin = true;
1162                 }
1163         }
1164
1165         /*
1166          * Fail if violated any SJ's RHS and didn't match to a LEFT SJ: the
1167          * proposed join can't associate into an SJ's RHS.
1168          *
1169          * Also, fail if the proposed join's predicate isn't strict; we're
1170          * essentially checking to see if we can apply outer-join identity 3, and
1171          * that's a requirement.  (This check may be redundant with checks in
1172          * make_outerjoininfo, but I'm not quite sure, and it's cheap to test.)
1173          */
1174         if (must_be_leftjoin &&
1175                 (match_sjinfo == NULL ||
1176                  match_sjinfo->jointype != JOIN_LEFT ||
1177                  !match_sjinfo->lhs_strict))
1178                 return false;                   /* invalid join path */
1179
1180         /*
1181          * We also have to check for constraints imposed by LATERAL references.
1182          */
1183         if (root->hasLateralRTEs)
1184         {
1185                 bool            lateral_fwd;
1186                 bool            lateral_rev;
1187                 Relids          join_lateral_rels;
1188
1189                 /*
1190                  * The proposed rels could each contain lateral references to the
1191                  * other, in which case the join is impossible.  If there are lateral
1192                  * references in just one direction, then the join has to be done with
1193                  * a nestloop with the lateral referencer on the inside.  If the join
1194                  * matches an SJ that cannot be implemented by such a nestloop, the
1195                  * join is impossible.
1196                  *
1197                  * Also, if the lateral reference is only indirect, we should reject
1198                  * the join; whatever rel(s) the reference chain goes through must be
1199                  * joined to first.
1200                  *
1201                  * Another case that might keep us from building a valid plan is the
1202                  * implementation restriction described by have_dangerous_phv().
1203                  */
1204                 lateral_fwd = bms_overlap(rel1->relids, rel2->lateral_relids);
1205                 lateral_rev = bms_overlap(rel2->relids, rel1->lateral_relids);
1206                 if (lateral_fwd && lateral_rev)
1207                         return false;           /* have lateral refs in both directions */
1208                 if (lateral_fwd)
1209                 {
1210                         /* has to be implemented as nestloop with rel1 on left */
1211                         if (match_sjinfo &&
1212                                 (reversed ||
1213                                  unique_ified ||
1214                                  match_sjinfo->jointype == JOIN_FULL))
1215                                 return false;   /* not implementable as nestloop */
1216                         /* check there is a direct reference from rel2 to rel1 */
1217                         if (!bms_overlap(rel1->relids, rel2->direct_lateral_relids))
1218                                 return false;   /* only indirect refs, so reject */
1219                         /* check we won't have a dangerous PHV */
1220                         if (have_dangerous_phv(root, rel1->relids, rel2->lateral_relids))
1221                                 return false;   /* might be unable to handle required PHV */
1222                 }
1223                 else if (lateral_rev)
1224                 {
1225                         /* has to be implemented as nestloop with rel2 on left */
1226                         if (match_sjinfo &&
1227                                 (!reversed ||
1228                                  unique_ified ||
1229                                  match_sjinfo->jointype == JOIN_FULL))
1230                                 return false;   /* not implementable as nestloop */
1231                         /* check there is a direct reference from rel1 to rel2 */
1232                         if (!bms_overlap(rel2->relids, rel1->direct_lateral_relids))
1233                                 return false;   /* only indirect refs, so reject */
1234                         /* check we won't have a dangerous PHV */
1235                         if (have_dangerous_phv(root, rel2->relids, rel1->lateral_relids))
1236                                 return false;   /* might be unable to handle required PHV */
1237                 }
1238
1239                 /*
1240                  * LATERAL references could also cause problems later on if we accept
1241                  * this join: if the join's minimum parameterization includes any rels
1242                  * that would have to be on the inside of an outer join with this join
1243                  * rel, then it's never going to be possible to build the complete
1244                  * query using this join.  We should reject this join not only because
1245                  * it'll save work, but because if we don't, the clauseless-join
1246                  * heuristics might think that legality of this join means that some
1247                  * other join rel need not be formed, and that could lead to failure
1248                  * to find any plan at all.  We have to consider not only rels that
1249                  * are directly on the inner side of an OJ with the joinrel, but also
1250                  * ones that are indirectly so, so search to find all such rels.
1251                  */
1252                 join_lateral_rels = min_join_parameterization(root, joinrelids,
1253                                                                                                           rel1, rel2);
1254                 if (join_lateral_rels)
1255                 {
1256                         Relids          join_plus_rhs = bms_copy(joinrelids);
1257                         bool            more;
1258
1259                         do
1260                         {
1261                                 more = false;
1262                                 foreach(l, root->join_info_list)
1263                                 {
1264                                         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1265
1266                                         if (bms_overlap(sjinfo->min_lefthand, join_plus_rhs) &&
1267                                                 !bms_is_subset(sjinfo->min_righthand, join_plus_rhs))
1268                                         {
1269                                                 join_plus_rhs = bms_add_members(join_plus_rhs,
1270                                                                                                           sjinfo->min_righthand);
1271                                                 more = true;
1272                                         }
1273                                         /* full joins constrain both sides symmetrically */
1274                                         if (sjinfo->jointype == JOIN_FULL &&
1275                                                 bms_overlap(sjinfo->min_righthand, join_plus_rhs) &&
1276                                                 !bms_is_subset(sjinfo->min_lefthand, join_plus_rhs))
1277                                         {
1278                                                 join_plus_rhs = bms_add_members(join_plus_rhs,
1279                                                                                                                 sjinfo->min_lefthand);
1280                                                 more = true;
1281                                         }
1282                                 }
1283                         } while (more);
1284                         if (bms_overlap(join_plus_rhs, join_lateral_rels))
1285                                 return false;   /* will not be able to join to some RHS rel */
1286                 }
1287         }
1288
1289         /* Otherwise, it's a valid join */
1290         *sjinfo_p = match_sjinfo;
1291         *reversed_p = reversed;
1292         return true;
1293 }
1294
1295 /*
1296  * has_join_restriction
1297  *              Detect whether the specified relation has join-order restrictions,
1298  *              due to being inside an outer join or an IN (sub-SELECT),
1299  *              or participating in any LATERAL references or multi-rel PHVs.
1300  *
1301  * Essentially, this tests whether have_join_order_restriction() could
1302  * succeed with this rel and some other one.  It's OK if we sometimes
1303  * say "true" incorrectly.  (Therefore, we don't bother with the relatively
1304  * expensive has_legal_joinclause test.)
1305  */
1306 static bool
1307 has_join_restriction(PlannerInfo *root, RelOptInfo *rel)
1308 {
1309         ListCell   *l;
1310
1311         if (rel->lateral_relids != NULL || rel->lateral_referencers != NULL)
1312                 return true;
1313
1314         foreach(l, root->placeholder_list)
1315         {
1316                 PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l);
1317
1318                 if (bms_is_subset(rel->relids, phinfo->ph_eval_at) &&
1319                         !bms_equal(rel->relids, phinfo->ph_eval_at))
1320                         return true;
1321         }
1322
1323         foreach(l, root->join_info_list)
1324         {
1325                 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1326
1327                 /* ignore full joins --- other mechanisms preserve their ordering */
1328                 if (sjinfo->jointype == JOIN_FULL)
1329                         continue;
1330
1331                 /* ignore if SJ is already contained in rel */
1332                 if (bms_is_subset(sjinfo->min_lefthand, rel->relids) &&
1333                         bms_is_subset(sjinfo->min_righthand, rel->relids))
1334                         continue;
1335
1336                 /* restricted if it overlaps LHS or RHS, but doesn't contain SJ */
1337                 if (bms_overlap(sjinfo->min_lefthand, rel->relids) ||
1338                         bms_overlap(sjinfo->min_righthand, rel->relids))
1339                         return true;
1340         }
1341
1342         return false;
1343 }
1344
1345 /*
1346  * is_dummy_rel --- has relation been proven empty?
1347  */
1348 static bool
1349 is_dummy_rel(RelOptInfo *rel)
1350 {
1351         return IS_DUMMY_REL(rel);
1352 }
1353
1354 /*
1355  * Mark a relation as proven empty.
1356  *
1357  * During GEQO planning, this can get invoked more than once on the same
1358  * baserel struct, so it's worth checking to see if the rel is already marked
1359  * dummy.
1360  *
1361  * Also, when called during GEQO join planning, we are in a short-lived
1362  * memory context.  We must make sure that the dummy path attached to a
1363  * baserel survives the GEQO cycle, else the baserel is trashed for future
1364  * GEQO cycles.  On the other hand, when we are marking a joinrel during GEQO,
1365  * we don't want the dummy path to clutter the main planning context.  Upshot
1366  * is that the best solution is to explicitly make the dummy path in the same
1367  * context the given RelOptInfo is in.
1368  */
1369 static void
1370 mark_dummy_rel(RelOptInfo *rel)
1371 {
1372         MemoryContext oldcontext;
1373
1374         /* Already marked? */
1375         if (is_dummy_rel(rel))
1376                 return;
1377
1378         /* No, so choose correct context to make the dummy path in */
1379         oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1380
1381         /* Set dummy size estimate */
1382         rel->rows = 0;
1383
1384         /* Evict any previously chosen paths */
1385         rel->pathlist = NIL;
1386         rel->partial_pathlist = NIL;
1387
1388         /* Set up the dummy path */
1389         add_path(rel, (Path *) create_append_path(rel, NIL, NULL, 0));
1390
1391         /* Set or update cheapest_total_path and related fields */
1392         set_cheapest(rel);
1393
1394         MemoryContextSwitchTo(oldcontext);
1395 }
1396
1397 /*
1398  * restriction_is_constant_false --- is a restrictlist just false?
1399  *
1400  * In cases where a qual is provably constant false, eval_const_expressions
1401  * will generally have thrown away anything that's ANDed with it.  In outer
1402  * join situations this will leave us computing cartesian products only to
1403  * decide there's no match for an outer row, which is pretty stupid.  So,
1404  * we need to detect the case.
1405  *
1406  * If only_pushed_down is true, then consider only quals that are pushed-down
1407  * from the point of view of the joinrel.
1408  */
1409 static bool
1410 restriction_is_constant_false(List *restrictlist,
1411                                                           RelOptInfo *joinrel,
1412                                                           bool only_pushed_down)
1413 {
1414         ListCell   *lc;
1415
1416         /*
1417          * Despite the above comment, the restriction list we see here might
1418          * possibly have other members besides the FALSE constant, since other
1419          * quals could get "pushed down" to the outer join level.  So we check
1420          * each member of the list.
1421          */
1422         foreach(lc, restrictlist)
1423         {
1424                 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1425
1426                 Assert(IsA(rinfo, RestrictInfo));
1427                 if (only_pushed_down && !RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
1428                         continue;
1429
1430                 if (rinfo->clause && IsA(rinfo->clause, Const))
1431                 {
1432                         Const      *con = (Const *) rinfo->clause;
1433
1434                         /* constant NULL is as good as constant FALSE for our purposes */
1435                         if (con->constisnull)
1436                                 return true;
1437                         if (!DatumGetBool(con->constvalue))
1438                                 return true;
1439                 }
1440         }
1441         return false;
1442 }