OSDN Git Service

8507d70f035740ab53a6109b7368541d71ee716b
[pghintplan/pg_hint_plan.git] / core.c
1 /*-------------------------------------------------------------------------
2  *
3  * core.c
4  *        Routines copied from PostgreSQL core distribution.
5  *
6  * The main purpose of this files is having access to static functions in core.
7  * Another purpose is tweaking functions behavior by replacing part of them by
8  * macro definitions. See at the end of pg_hint_plan.c for details. Anyway,
9  * this file *must* contain required functions without making any change.
10  *
11  * This file contains the following functions from corresponding files.
12  *
13  * src/backend/optimizer/path/allpaths.c
14  *
15  *      static functions:
16  *     set_plain_rel_pathlist()
17  *     create_plain_partial_paths()
18  *     set_append_rel_pathlist()
19  *     generate_mergeappend_paths()
20  *     get_cheapest_parameterized_child_path()
21  *     accumulate_append_subpath()
22  *
23  *  public functions:
24  *     standard_join_search(): This funcion is not static. The reason for
25  *        including this function is make_rels_by_clause_joins. In order to
26  *        avoid generating apparently unwanted join combination, we decided to
27  *        change the behavior of make_join_rel, which is called under this
28  *        function.
29  *
30  * src/backend/optimizer/path/joinrels.c
31  *
32  *      public functions:
33  *     join_search_one_level(): We have to modify this to call my definition of
34  *                  make_rels_by_clause_joins.
35  *
36  *      static functions:
37  *     make_rels_by_clause_joins()
38  *     make_rels_by_clauseless_joins()
39  *     join_is_legal()
40  *     has_join_restriction()
41  *     is_dummy_rel()
42  *     mark_dummy_rel()
43  *     restriction_is_constant_false()
44  *
45  *
46  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
47  * Portions Copyright (c) 1994, Regents of the University of California
48  *
49  *-------------------------------------------------------------------------
50  */
51
52
53 /*
54  * set_plain_rel_pathlist
55  *        Build access paths for a plain relation (no subquery, no inheritance)
56  */
57 static void
58 set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
59 {
60         Relids          required_outer;
61
62         /*
63          * We don't support pushing join clauses into the quals of a seqscan, but
64          * it could still have required parameterization due to LATERAL refs in
65          * its tlist.
66          */
67         required_outer = rel->lateral_relids;
68
69         /* Consider sequential scan */
70         add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
71
72         /* If appropriate, consider parallel sequential scan */
73         if (rel->consider_parallel && required_outer == NULL)
74                 create_plain_partial_paths(root, rel);
75
76         /* Consider index scans */
77         create_index_paths(root, rel);
78
79         /* Consider TID scans */
80         create_tidscan_paths(root, rel);
81 }
82
83 /*
84  * create_plain_partial_paths
85  *        Build partial access paths for parallel scan of a plain relation
86  */
87 static void
88 create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
89 {
90         int                     parallel_workers;
91
92         /*
93          * If the user has set the parallel_workers reloption, use that; otherwise
94          * select a default number of workers.
95          */
96         if (rel->rel_parallel_workers != -1)
97                 parallel_workers = rel->rel_parallel_workers;
98         else
99         {
100                 int                     parallel_threshold;
101
102                 /*
103                  * If this relation is too small to be worth a parallel scan, just
104                  * return without doing anything ... unless it's an inheritance child.
105                  * In that case, we want to generate a parallel path here anyway.  It
106                  * might not be worthwhile just for this relation, but when combined
107                  * with all of its inheritance siblings it may well pay off.
108                  */
109                 if (rel->pages < (BlockNumber) min_parallel_relation_size &&
110                         rel->reloptkind == RELOPT_BASEREL)
111                         return;
112
113                 /*
114                  * Select the number of workers based on the log of the size of the
115                  * relation.  This probably needs to be a good deal more
116                  * sophisticated, but we need something here for now.  Note that the
117                  * upper limit of the min_parallel_relation_size GUC is chosen to
118                  * prevent overflow here.
119                  */
120                 parallel_workers = 1;
121                 parallel_threshold = Max(min_parallel_relation_size, 1);
122                 while (rel->pages >= (BlockNumber) (parallel_threshold * 3))
123                 {
124                         parallel_workers++;
125                         parallel_threshold *= 3;
126                         if (parallel_threshold > INT_MAX / 3)
127                                 break;                  /* avoid overflow */
128                 }
129         }
130
131         /*
132          * In no case use more than max_parallel_workers_per_gather workers.
133          */
134         parallel_workers = Min(parallel_workers, max_parallel_workers_per_gather);
135
136         /* If any limit was set to zero, the user doesn't want a parallel scan. */
137         if (parallel_workers <= 0)
138                 return;
139
140         /* Add an unordered partial path based on a parallel sequential scan. */
141         add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
142 }
143
144 /*
145  * set_append_rel_pathlist
146  *        Build access paths for an "append relation"
147  */
148 static void
149 set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
150                                                 Index rti, RangeTblEntry *rte)
151 {
152         int                     parentRTindex = rti;
153         List       *live_childrels = NIL;
154         List       *subpaths = NIL;
155         bool            subpaths_valid = true;
156         List       *partial_subpaths = NIL;
157         bool            partial_subpaths_valid = true;
158         List       *all_child_pathkeys = NIL;
159         List       *all_child_outers = NIL;
160         ListCell   *l;
161
162         /*
163          * Generate access paths for each member relation, and remember the
164          * cheapest path for each one.  Also, identify all pathkeys (orderings)
165          * and parameterizations (required_outer sets) available for the member
166          * relations.
167          */
168         foreach(l, root->append_rel_list)
169         {
170                 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
171                 int                     childRTindex;
172                 RangeTblEntry *childRTE;
173                 RelOptInfo *childrel;
174                 ListCell   *lcp;
175
176                 /* append_rel_list contains all append rels; ignore others */
177                 if (appinfo->parent_relid != parentRTindex)
178                         continue;
179
180                 /* Re-locate the child RTE and RelOptInfo */
181                 childRTindex = appinfo->child_relid;
182                 childRTE = root->simple_rte_array[childRTindex];
183                 childrel = root->simple_rel_array[childRTindex];
184
185                 /*
186                  * If set_append_rel_size() decided the parent appendrel was
187                  * parallel-unsafe at some point after visiting this child rel, we
188                  * need to propagate the unsafety marking down to the child, so that
189                  * we don't generate useless partial paths for it.
190                  */
191                 if (!rel->consider_parallel)
192                         childrel->consider_parallel = false;
193
194                 /*
195                  * Compute the child's access paths.
196                  */
197                 set_rel_pathlist(root, childrel, childRTindex, childRTE);
198
199                 /*
200                  * If child is dummy, ignore it.
201                  */
202                 if (IS_DUMMY_REL(childrel))
203                         continue;
204
205                 /*
206                  * Child is live, so add it to the live_childrels list for use below.
207                  */
208                 live_childrels = lappend(live_childrels, childrel);
209
210                 /*
211                  * If child has an unparameterized cheapest-total path, add that to
212                  * the unparameterized Append path we are constructing for the parent.
213                  * If not, there's no workable unparameterized path.
214                  */
215                 if (childrel->cheapest_total_path->param_info == NULL)
216                         subpaths = accumulate_append_subpath(subpaths,
217                                                                                           childrel->cheapest_total_path);
218                 else
219                         subpaths_valid = false;
220
221                 /* Same idea, but for a partial plan. */
222                 if (childrel->partial_pathlist != NIL)
223                         partial_subpaths = accumulate_append_subpath(partial_subpaths,
224                                                                            linitial(childrel->partial_pathlist));
225                 else
226                         partial_subpaths_valid = false;
227
228                 /*
229                  * Collect lists of all the available path orderings and
230                  * parameterizations for all the children.  We use these as a
231                  * heuristic to indicate which sort orderings and parameterizations we
232                  * should build Append and MergeAppend paths for.
233                  */
234                 foreach(lcp, childrel->pathlist)
235                 {
236                         Path       *childpath = (Path *) lfirst(lcp);
237                         List       *childkeys = childpath->pathkeys;
238                         Relids          childouter = PATH_REQ_OUTER(childpath);
239
240                         /* Unsorted paths don't contribute to pathkey list */
241                         if (childkeys != NIL)
242                         {
243                                 ListCell   *lpk;
244                                 bool            found = false;
245
246                                 /* Have we already seen this ordering? */
247                                 foreach(lpk, all_child_pathkeys)
248                                 {
249                                         List       *existing_pathkeys = (List *) lfirst(lpk);
250
251                                         if (compare_pathkeys(existing_pathkeys,
252                                                                                  childkeys) == PATHKEYS_EQUAL)
253                                         {
254                                                 found = true;
255                                                 break;
256                                         }
257                                 }
258                                 if (!found)
259                                 {
260                                         /* No, so add it to all_child_pathkeys */
261                                         all_child_pathkeys = lappend(all_child_pathkeys,
262                                                                                                  childkeys);
263                                 }
264                         }
265
266                         /* Unparameterized paths don't contribute to param-set list */
267                         if (childouter)
268                         {
269                                 ListCell   *lco;
270                                 bool            found = false;
271
272                                 /* Have we already seen this param set? */
273                                 foreach(lco, all_child_outers)
274                                 {
275                                         Relids          existing_outers = (Relids) lfirst(lco);
276
277                                         if (bms_equal(existing_outers, childouter))
278                                         {
279                                                 found = true;
280                                                 break;
281                                         }
282                                 }
283                                 if (!found)
284                                 {
285                                         /* No, so add it to all_child_outers */
286                                         all_child_outers = lappend(all_child_outers,
287                                                                                            childouter);
288                                 }
289                         }
290                 }
291         }
292
293         /*
294          * If we found unparameterized paths for all children, build an unordered,
295          * unparameterized Append path for the rel.  (Note: this is correct even
296          * if we have zero or one live subpath due to constraint exclusion.)
297          */
298         if (subpaths_valid)
299                 add_path(rel, (Path *) create_append_path(rel, subpaths, NULL, 0));
300
301         /*
302          * Consider an append of partial unordered, unparameterized partial paths.
303          */
304         if (partial_subpaths_valid)
305         {
306                 AppendPath *appendpath;
307                 ListCell   *lc;
308                 int                     parallel_workers = 0;
309
310                 /*
311                  * Decide on the number of workers to request for this append path.
312                  * For now, we just use the maximum value from among the members.  It
313                  * might be useful to use a higher number if the Append node were
314                  * smart enough to spread out the workers, but it currently isn't.
315                  */
316                 foreach(lc, partial_subpaths)
317                 {
318                         Path       *path = lfirst(lc);
319
320                         parallel_workers = Max(parallel_workers, path->parallel_workers);
321                 }
322                 Assert(parallel_workers > 0);
323
324                 /* Generate a partial append path. */
325                 appendpath = create_append_path(rel, partial_subpaths, NULL,
326                                                                                 parallel_workers);
327                 add_partial_path(rel, (Path *) appendpath);
328         }
329
330         /*
331          * Also build unparameterized MergeAppend paths based on the collected
332          * list of child pathkeys.
333          */
334         if (subpaths_valid)
335                 generate_mergeappend_paths(root, rel, live_childrels,
336                                                                    all_child_pathkeys);
337
338         /*
339          * Build Append paths for each parameterization seen among the child rels.
340          * (This may look pretty expensive, but in most cases of practical
341          * interest, the child rels will expose mostly the same parameterizations,
342          * so that not that many cases actually get considered here.)
343          *
344          * The Append node itself cannot enforce quals, so all qual checking must
345          * be done in the child paths.  This means that to have a parameterized
346          * Append path, we must have the exact same parameterization for each
347          * child path; otherwise some children might be failing to check the
348          * moved-down quals.  To make them match up, we can try to increase the
349          * parameterization of lesser-parameterized paths.
350          */
351         foreach(l, all_child_outers)
352         {
353                 Relids          required_outer = (Relids) lfirst(l);
354                 ListCell   *lcr;
355
356                 /* Select the child paths for an Append with this parameterization */
357                 subpaths = NIL;
358                 subpaths_valid = true;
359                 foreach(lcr, live_childrels)
360                 {
361                         RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
362                         Path       *subpath;
363
364                         subpath = get_cheapest_parameterized_child_path(root,
365                                                                                                                         childrel,
366                                                                                                                         required_outer);
367                         if (subpath == NULL)
368                         {
369                                 /* failed to make a suitable path for this child */
370                                 subpaths_valid = false;
371                                 break;
372                         }
373                         subpaths = accumulate_append_subpath(subpaths, subpath);
374                 }
375
376                 if (subpaths_valid)
377                         add_path(rel, (Path *)
378                                          create_append_path(rel, subpaths, required_outer, 0));
379         }
380 }
381
382 /*
383  * generate_mergeappend_paths
384  *              Generate MergeAppend paths for an append relation
385  *
386  * Generate a path for each ordering (pathkey list) appearing in
387  * all_child_pathkeys.
388  *
389  * We consider both cheapest-startup and cheapest-total cases, ie, for each
390  * interesting ordering, collect all the cheapest startup subpaths and all the
391  * cheapest total paths, and build a MergeAppend path for each case.
392  *
393  * We don't currently generate any parameterized MergeAppend paths.  While
394  * it would not take much more code here to do so, it's very unclear that it
395  * is worth the planning cycles to investigate such paths: there's little
396  * use for an ordered path on the inside of a nestloop.  In fact, it's likely
397  * that the current coding of add_path would reject such paths out of hand,
398  * because add_path gives no credit for sort ordering of parameterized paths,
399  * and a parameterized MergeAppend is going to be more expensive than the
400  * corresponding parameterized Append path.  If we ever try harder to support
401  * parameterized mergejoin plans, it might be worth adding support for
402  * parameterized MergeAppends to feed such joins.  (See notes in
403  * optimizer/README for why that might not ever happen, though.)
404  */
405 static void
406 generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
407                                                    List *live_childrels,
408                                                    List *all_child_pathkeys)
409 {
410         ListCell   *lcp;
411
412         foreach(lcp, all_child_pathkeys)
413         {
414                 List       *pathkeys = (List *) lfirst(lcp);
415                 List       *startup_subpaths = NIL;
416                 List       *total_subpaths = NIL;
417                 bool            startup_neq_total = false;
418                 ListCell   *lcr;
419
420                 /* Select the child paths for this ordering... */
421                 foreach(lcr, live_childrels)
422                 {
423                         RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
424                         Path       *cheapest_startup,
425                                            *cheapest_total;
426
427                         /* Locate the right paths, if they are available. */
428                         cheapest_startup =
429                                 get_cheapest_path_for_pathkeys(childrel->pathlist,
430                                                                                            pathkeys,
431                                                                                            NULL,
432                                                                                            STARTUP_COST);
433                         cheapest_total =
434                                 get_cheapest_path_for_pathkeys(childrel->pathlist,
435                                                                                            pathkeys,
436                                                                                            NULL,
437                                                                                            TOTAL_COST);
438
439                         /*
440                          * If we can't find any paths with the right order just use the
441                          * cheapest-total path; we'll have to sort it later.
442                          */
443                         if (cheapest_startup == NULL || cheapest_total == NULL)
444                         {
445                                 cheapest_startup = cheapest_total =
446                                         childrel->cheapest_total_path;
447                                 /* Assert we do have an unparameterized path for this child */
448                                 Assert(cheapest_total->param_info == NULL);
449                         }
450
451                         /*
452                          * Notice whether we actually have different paths for the
453                          * "cheapest" and "total" cases; frequently there will be no point
454                          * in two create_merge_append_path() calls.
455                          */
456                         if (cheapest_startup != cheapest_total)
457                                 startup_neq_total = true;
458
459                         startup_subpaths =
460                                 accumulate_append_subpath(startup_subpaths, cheapest_startup);
461                         total_subpaths =
462                                 accumulate_append_subpath(total_subpaths, cheapest_total);
463                 }
464
465                 /* ... and build the MergeAppend paths */
466                 add_path(rel, (Path *) create_merge_append_path(root,
467                                                                                                                 rel,
468                                                                                                                 startup_subpaths,
469                                                                                                                 pathkeys,
470                                                                                                                 NULL));
471                 if (startup_neq_total)
472                         add_path(rel, (Path *) create_merge_append_path(root,
473                                                                                                                         rel,
474                                                                                                                         total_subpaths,
475                                                                                                                         pathkeys,
476                                                                                                                         NULL));
477         }
478 }
479
480 /*
481  * get_cheapest_parameterized_child_path
482  *              Get cheapest path for this relation that has exactly the requested
483  *              parameterization.
484  *
485  * Returns NULL if unable to create such a path.
486  */
487 static Path *
488 get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
489                                                                           Relids required_outer)
490 {
491         Path       *cheapest;
492         ListCell   *lc;
493
494         /*
495          * Look up the cheapest existing path with no more than the needed
496          * parameterization.  If it has exactly the needed parameterization, we're
497          * done.
498          */
499         cheapest = get_cheapest_path_for_pathkeys(rel->pathlist,
500                                                                                           NIL,
501                                                                                           required_outer,
502                                                                                           TOTAL_COST);
503         Assert(cheapest != NULL);
504         if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
505                 return cheapest;
506
507         /*
508          * Otherwise, we can "reparameterize" an existing path to match the given
509          * parameterization, which effectively means pushing down additional
510          * joinquals to be checked within the path's scan.  However, some existing
511          * paths might check the available joinquals already while others don't;
512          * therefore, it's not clear which existing path will be cheapest after
513          * reparameterization.  We have to go through them all and find out.
514          */
515         cheapest = NULL;
516         foreach(lc, rel->pathlist)
517         {
518                 Path       *path = (Path *) lfirst(lc);
519
520                 /* Can't use it if it needs more than requested parameterization */
521                 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
522                         continue;
523
524                 /*
525                  * Reparameterization can only increase the path's cost, so if it's
526                  * already more expensive than the current cheapest, forget it.
527                  */
528                 if (cheapest != NULL &&
529                         compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
530                         continue;
531
532                 /* Reparameterize if needed, then recheck cost */
533                 if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
534                 {
535                         path = reparameterize_path(root, path, required_outer, 1.0);
536                         if (path == NULL)
537                                 continue;               /* failed to reparameterize this one */
538                         Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
539
540                         if (cheapest != NULL &&
541                                 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
542                                 continue;
543                 }
544
545                 /* We have a new best path */
546                 cheapest = path;
547         }
548
549         /* Return the best path, or NULL if we found no suitable candidate */
550         return cheapest;
551 }
552
553 /*
554  * accumulate_append_subpath
555  *              Add a subpath to the list being built for an Append or MergeAppend
556  *
557  * It's possible that the child is itself an Append or MergeAppend path, in
558  * which case we can "cut out the middleman" and just add its child paths to
559  * our own list.  (We don't try to do this earlier because we need to apply
560  * both levels of transformation to the quals.)
561  *
562  * Note that if we omit a child MergeAppend in this way, we are effectively
563  * omitting a sort step, which seems fine: if the parent is to be an Append,
564  * its result would be unsorted anyway, while if the parent is to be a
565  * MergeAppend, there's no point in a separate sort on a child.
566  */
567 static List *
568 accumulate_append_subpath(List *subpaths, Path *path)
569 {
570         if (IsA(path, AppendPath))
571         {
572                 AppendPath *apath = (AppendPath *) path;
573
574                 /* list_copy is important here to avoid sharing list substructure */
575                 return list_concat(subpaths, list_copy(apath->subpaths));
576         }
577         else if (IsA(path, MergeAppendPath))
578         {
579                 MergeAppendPath *mpath = (MergeAppendPath *) path;
580
581                 /* list_copy is important here to avoid sharing list substructure */
582                 return list_concat(subpaths, list_copy(mpath->subpaths));
583         }
584         else
585                 return lappend(subpaths, path);
586 }
587
588 /*
589  * standard_join_search
590  *        Find possible joinpaths for a query by successively finding ways
591  *        to join component relations into join relations.
592  *
593  * 'levels_needed' is the number of iterations needed, ie, the number of
594  *              independent jointree items in the query.  This is > 1.
595  *
596  * 'initial_rels' is a list of RelOptInfo nodes for each independent
597  *              jointree item.  These are the components to be joined together.
598  *              Note that levels_needed == list_length(initial_rels).
599  *
600  * Returns the final level of join relations, i.e., the relation that is
601  * the result of joining all the original relations together.
602  * At least one implementation path must be provided for this relation and
603  * all required sub-relations.
604  *
605  * To support loadable plugins that modify planner behavior by changing the
606  * join searching algorithm, we provide a hook variable that lets a plugin
607  * replace or supplement this function.  Any such hook must return the same
608  * final join relation as the standard code would, but it might have a
609  * different set of implementation paths attached, and only the sub-joinrels
610  * needed for these paths need have been instantiated.
611  *
612  * Note to plugin authors: the functions invoked during standard_join_search()
613  * modify root->join_rel_list and root->join_rel_hash.  If you want to do more
614  * than one join-order search, you'll probably need to save and restore the
615  * original states of those data structures.  See geqo_eval() for an example.
616  */
617 RelOptInfo *
618 standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
619 {
620         int                     lev;
621         RelOptInfo *rel;
622
623         /*
624          * This function cannot be invoked recursively within any one planning
625          * problem, so join_rel_level[] can't be in use already.
626          */
627         Assert(root->join_rel_level == NULL);
628
629         /*
630          * We employ a simple "dynamic programming" algorithm: we first find all
631          * ways to build joins of two jointree items, then all ways to build joins
632          * of three items (from two-item joins and single items), then four-item
633          * joins, and so on until we have considered all ways to join all the
634          * items into one rel.
635          *
636          * root->join_rel_level[j] is a list of all the j-item rels.  Initially we
637          * set root->join_rel_level[1] to represent all the single-jointree-item
638          * relations.
639          */
640         root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
641
642         root->join_rel_level[1] = initial_rels;
643
644         for (lev = 2; lev <= levels_needed; lev++)
645         {
646                 ListCell   *lc;
647
648                 /*
649                  * Determine all possible pairs of relations to be joined at this
650                  * level, and build paths for making each one from every available
651                  * pair of lower-level relations.
652                  */
653                 join_search_one_level(root, lev);
654
655                 /*
656                  * Run generate_gather_paths() for each just-processed joinrel.  We
657                  * could not do this earlier because both regular and partial paths
658                  * can get added to a particular joinrel at multiple times within
659                  * join_search_one_level.  After that, we're done creating paths for
660                  * the joinrel, so run set_cheapest().
661                  */
662                 foreach(lc, root->join_rel_level[lev])
663                 {
664                         rel = (RelOptInfo *) lfirst(lc);
665
666                         /* Create GatherPaths for any useful partial paths for rel */
667                         generate_gather_paths(root, rel);
668
669                         /* Find and save the cheapest paths for this rel */
670                         set_cheapest(rel);
671
672 #ifdef OPTIMIZER_DEBUG
673                         debug_print_rel(root, rel);
674 #endif
675                 }
676         }
677
678         /*
679          * We should have a single rel at the final level.
680          */
681         if (root->join_rel_level[levels_needed] == NIL)
682                 elog(ERROR, "failed to build any %d-way joins", levels_needed);
683         Assert(list_length(root->join_rel_level[levels_needed]) == 1);
684
685         rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
686
687         root->join_rel_level = NULL;
688
689         return rel;
690 }
691
692 /*
693  * join_search_one_level
694  *        Consider ways to produce join relations containing exactly 'level'
695  *        jointree items.  (This is one step of the dynamic-programming method
696  *        embodied in standard_join_search.)  Join rel nodes for each feasible
697  *        combination of lower-level rels are created and returned in a list.
698  *        Implementation paths are created for each such joinrel, too.
699  *
700  * level: level of rels we want to make this time
701  * root->join_rel_level[j], 1 <= j < level, is a list of rels containing j items
702  *
703  * The result is returned in root->join_rel_level[level].
704  */
705 void
706 join_search_one_level(PlannerInfo *root, int level)
707 {
708         List      **joinrels = root->join_rel_level;
709         ListCell   *r;
710         int                     k;
711
712         Assert(joinrels[level] == NIL);
713
714         /* Set join_cur_level so that new joinrels are added to proper list */
715         root->join_cur_level = level;
716
717         /*
718          * First, consider left-sided and right-sided plans, in which rels of
719          * exactly level-1 member relations are joined against initial relations.
720          * We prefer to join using join clauses, but if we find a rel of level-1
721          * members that has no join clauses, we will generate Cartesian-product
722          * joins against all initial rels not already contained in it.
723          */
724         foreach(r, joinrels[level - 1])
725         {
726                 RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
727
728                 if (old_rel->joininfo != NIL || old_rel->has_eclass_joins ||
729                         has_join_restriction(root, old_rel))
730                 {
731                         /*
732                          * There are join clauses or join order restrictions relevant to
733                          * this rel, so consider joins between this rel and (only) those
734                          * initial rels it is linked to by a clause or restriction.
735                          *
736                          * At level 2 this condition is symmetric, so there is no need to
737                          * look at initial rels before this one in the list; we already
738                          * considered such joins when we were at the earlier rel.  (The
739                          * mirror-image joins are handled automatically by make_join_rel.)
740                          * In later passes (level > 2), we join rels of the previous level
741                          * to each initial rel they don't already include but have a join
742                          * clause or restriction with.
743                          */
744                         ListCell   *other_rels;
745
746                         if (level == 2)         /* consider remaining initial rels */
747                                 other_rels = lnext(r);
748                         else    /* consider all initial rels */
749                                 other_rels = list_head(joinrels[1]);
750
751                         make_rels_by_clause_joins(root,
752                                                                           old_rel,
753                                                                           other_rels);
754                 }
755                 else
756                 {
757                         /*
758                          * Oops, we have a relation that is not joined to any other
759                          * relation, either directly or by join-order restrictions.
760                          * Cartesian product time.
761                          *
762                          * We consider a cartesian product with each not-already-included
763                          * initial rel, whether it has other join clauses or not.  At
764                          * level 2, if there are two or more clauseless initial rels, we
765                          * will redundantly consider joining them in both directions; but
766                          * such cases aren't common enough to justify adding complexity to
767                          * avoid the duplicated effort.
768                          */
769                         make_rels_by_clauseless_joins(root,
770                                                                                   old_rel,
771                                                                                   list_head(joinrels[1]));
772                 }
773         }
774
775         /*
776          * Now, consider "bushy plans" in which relations of k initial rels are
777          * joined to relations of level-k initial rels, for 2 <= k <= level-2.
778          *
779          * We only consider bushy-plan joins for pairs of rels where there is a
780          * suitable join clause (or join order restriction), in order to avoid
781          * unreasonable growth of planning time.
782          */
783         for (k = 2;; k++)
784         {
785                 int                     other_level = level - k;
786
787                 /*
788                  * Since make_join_rel(x, y) handles both x,y and y,x cases, we only
789                  * need to go as far as the halfway point.
790                  */
791                 if (k > other_level)
792                         break;
793
794                 foreach(r, joinrels[k])
795                 {
796                         RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
797                         ListCell   *other_rels;
798                         ListCell   *r2;
799
800                         /*
801                          * We can ignore relations without join clauses here, unless they
802                          * participate in join-order restrictions --- then we might have
803                          * to force a bushy join plan.
804                          */
805                         if (old_rel->joininfo == NIL && !old_rel->has_eclass_joins &&
806                                 !has_join_restriction(root, old_rel))
807                                 continue;
808
809                         if (k == other_level)
810                                 other_rels = lnext(r);  /* only consider remaining rels */
811                         else
812                                 other_rels = list_head(joinrels[other_level]);
813
814                         for_each_cell(r2, other_rels)
815                         {
816                                 RelOptInfo *new_rel = (RelOptInfo *) lfirst(r2);
817
818                                 if (!bms_overlap(old_rel->relids, new_rel->relids))
819                                 {
820                                         /*
821                                          * OK, we can build a rel of the right level from this
822                                          * pair of rels.  Do so if there is at least one relevant
823                                          * join clause or join order restriction.
824                                          */
825                                         if (have_relevant_joinclause(root, old_rel, new_rel) ||
826                                                 have_join_order_restriction(root, old_rel, new_rel))
827                                         {
828                                                 (void) make_join_rel(root, old_rel, new_rel);
829                                         }
830                                 }
831                         }
832                 }
833         }
834
835         /*----------
836          * Last-ditch effort: if we failed to find any usable joins so far, force
837          * a set of cartesian-product joins to be generated.  This handles the
838          * special case where all the available rels have join clauses but we
839          * cannot use any of those clauses yet.  This can only happen when we are
840          * considering a join sub-problem (a sub-joinlist) and all the rels in the
841          * sub-problem have only join clauses with rels outside the sub-problem.
842          * An example is
843          *
844          *              SELECT ... FROM a INNER JOIN b ON TRUE, c, d, ...
845          *              WHERE a.w = c.x and b.y = d.z;
846          *
847          * If the "a INNER JOIN b" sub-problem does not get flattened into the
848          * upper level, we must be willing to make a cartesian join of a and b;
849          * but the code above will not have done so, because it thought that both
850          * a and b have joinclauses.  We consider only left-sided and right-sided
851          * cartesian joins in this case (no bushy).
852          *----------
853          */
854         if (joinrels[level] == NIL)
855         {
856                 /*
857                  * This loop is just like the first one, except we always call
858                  * make_rels_by_clauseless_joins().
859                  */
860                 foreach(r, joinrels[level - 1])
861                 {
862                         RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
863
864                         make_rels_by_clauseless_joins(root,
865                                                                                   old_rel,
866                                                                                   list_head(joinrels[1]));
867                 }
868
869                 /*----------
870                  * When special joins are involved, there may be no legal way
871                  * to make an N-way join for some values of N.  For example consider
872                  *
873                  * SELECT ... FROM t1 WHERE
874                  *       x IN (SELECT ... FROM t2,t3 WHERE ...) AND
875                  *       y IN (SELECT ... FROM t4,t5 WHERE ...)
876                  *
877                  * We will flatten this query to a 5-way join problem, but there are
878                  * no 4-way joins that join_is_legal() will consider legal.  We have
879                  * to accept failure at level 4 and go on to discover a workable
880                  * bushy plan at level 5.
881                  *
882                  * However, if there are no special joins and no lateral references
883                  * then join_is_legal() should never fail, and so the following sanity
884                  * check is useful.
885                  *----------
886                  */
887                 if (joinrels[level] == NIL &&
888                         root->join_info_list == NIL &&
889                         !root->hasLateralRTEs)
890                         elog(ERROR, "failed to build any %d-way joins", level);
891         }
892 }
893
894 /*
895  * make_rels_by_clause_joins
896  *        Build joins between the given relation 'old_rel' and other relations
897  *        that participate in join clauses that 'old_rel' also participates in
898  *        (or participate in join-order restrictions with it).
899  *        The join rels are returned in root->join_rel_level[join_cur_level].
900  *
901  * Note: at levels above 2 we will generate the same joined relation in
902  * multiple ways --- for example (a join b) join c is the same RelOptInfo as
903  * (b join c) join a, though the second case will add a different set of Paths
904  * to it.  This is the reason for using the join_rel_level mechanism, which
905  * automatically ensures that each new joinrel is only added to the list once.
906  *
907  * 'old_rel' is the relation entry for the relation to be joined
908  * 'other_rels': the first cell in a linked list containing the other
909  * rels to be considered for joining
910  *
911  * Currently, this is only used with initial rels in other_rels, but it
912  * will work for joining to joinrels too.
913  */
914 static void
915 make_rels_by_clause_joins(PlannerInfo *root,
916                                                   RelOptInfo *old_rel,
917                                                   ListCell *other_rels)
918 {
919         ListCell   *l;
920
921         for_each_cell(l, other_rels)
922         {
923                 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
924
925                 if (!bms_overlap(old_rel->relids, other_rel->relids) &&
926                         (have_relevant_joinclause(root, old_rel, other_rel) ||
927                          have_join_order_restriction(root, old_rel, other_rel)))
928                 {
929                         (void) make_join_rel(root, old_rel, other_rel);
930                 }
931         }
932 }
933
934 /*
935  * make_rels_by_clauseless_joins
936  *        Given a relation 'old_rel' and a list of other relations
937  *        'other_rels', create a join relation between 'old_rel' and each
938  *        member of 'other_rels' that isn't already included in 'old_rel'.
939  *        The join rels are returned in root->join_rel_level[join_cur_level].
940  *
941  * 'old_rel' is the relation entry for the relation to be joined
942  * 'other_rels': the first cell of a linked list containing the
943  * other rels to be considered for joining
944  *
945  * Currently, this is only used with initial rels in other_rels, but it would
946  * work for joining to joinrels too.
947  */
948 static void
949 make_rels_by_clauseless_joins(PlannerInfo *root,
950                                                           RelOptInfo *old_rel,
951                                                           ListCell *other_rels)
952 {
953         ListCell   *l;
954
955         for_each_cell(l, other_rels)
956         {
957                 RelOptInfo *other_rel = (RelOptInfo *) lfirst(l);
958
959                 if (!bms_overlap(other_rel->relids, old_rel->relids))
960                 {
961                         (void) make_join_rel(root, old_rel, other_rel);
962                 }
963         }
964 }
965
966 /*
967  * join_is_legal
968  *         Determine whether a proposed join is legal given the query's
969  *         join order constraints; and if it is, determine the join type.
970  *
971  * Caller must supply not only the two rels, but the union of their relids.
972  * (We could simplify the API by computing joinrelids locally, but this
973  * would be redundant work in the normal path through make_join_rel.)
974  *
975  * On success, *sjinfo_p is set to NULL if this is to be a plain inner join,
976  * else it's set to point to the associated SpecialJoinInfo node.  Also,
977  * *reversed_p is set TRUE if the given relations need to be swapped to
978  * match the SpecialJoinInfo node.
979  */
980 static bool
981 join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
982                           Relids joinrelids,
983                           SpecialJoinInfo **sjinfo_p, bool *reversed_p)
984 {
985         SpecialJoinInfo *match_sjinfo;
986         bool            reversed;
987         bool            unique_ified;
988         bool            must_be_leftjoin;
989         ListCell   *l;
990
991         /*
992          * Ensure output params are set on failure return.  This is just to
993          * suppress uninitialized-variable warnings from overly anal compilers.
994          */
995         *sjinfo_p = NULL;
996         *reversed_p = false;
997
998         /*
999          * If we have any special joins, the proposed join might be illegal; and
1000          * in any case we have to determine its join type.  Scan the join info
1001          * list for matches and conflicts.
1002          */
1003         match_sjinfo = NULL;
1004         reversed = false;
1005         unique_ified = false;
1006         must_be_leftjoin = false;
1007
1008         foreach(l, root->join_info_list)
1009         {
1010                 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1011
1012                 /*
1013                  * This special join is not relevant unless its RHS overlaps the
1014                  * proposed join.  (Check this first as a fast path for dismissing
1015                  * most irrelevant SJs quickly.)
1016                  */
1017                 if (!bms_overlap(sjinfo->min_righthand, joinrelids))
1018                         continue;
1019
1020                 /*
1021                  * Also, not relevant if proposed join is fully contained within RHS
1022                  * (ie, we're still building up the RHS).
1023                  */
1024                 if (bms_is_subset(joinrelids, sjinfo->min_righthand))
1025                         continue;
1026
1027                 /*
1028                  * Also, not relevant if SJ is already done within either input.
1029                  */
1030                 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
1031                         bms_is_subset(sjinfo->min_righthand, rel1->relids))
1032                         continue;
1033                 if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
1034                         bms_is_subset(sjinfo->min_righthand, rel2->relids))
1035                         continue;
1036
1037                 /*
1038                  * If it's a semijoin and we already joined the RHS to any other rels
1039                  * within either input, then we must have unique-ified the RHS at that
1040                  * point (see below).  Therefore the semijoin is no longer relevant in
1041                  * this join path.
1042                  */
1043                 if (sjinfo->jointype == JOIN_SEMI)
1044                 {
1045                         if (bms_is_subset(sjinfo->syn_righthand, rel1->relids) &&
1046                                 !bms_equal(sjinfo->syn_righthand, rel1->relids))
1047                                 continue;
1048                         if (bms_is_subset(sjinfo->syn_righthand, rel2->relids) &&
1049                                 !bms_equal(sjinfo->syn_righthand, rel2->relids))
1050                                 continue;
1051                 }
1052
1053                 /*
1054                  * If one input contains min_lefthand and the other contains
1055                  * min_righthand, then we can perform the SJ at this join.
1056                  *
1057                  * Reject if we get matches to more than one SJ; that implies we're
1058                  * considering something that's not really valid.
1059                  */
1060                 if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) &&
1061                         bms_is_subset(sjinfo->min_righthand, rel2->relids))
1062                 {
1063                         if (match_sjinfo)
1064                                 return false;   /* invalid join path */
1065                         match_sjinfo = sjinfo;
1066                         reversed = false;
1067                 }
1068                 else if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) &&
1069                                  bms_is_subset(sjinfo->min_righthand, rel1->relids))
1070                 {
1071                         if (match_sjinfo)
1072                                 return false;   /* invalid join path */
1073                         match_sjinfo = sjinfo;
1074                         reversed = true;
1075                 }
1076                 else if (sjinfo->jointype == JOIN_SEMI &&
1077                                  bms_equal(sjinfo->syn_righthand, rel2->relids) &&
1078                                  create_unique_path(root, rel2, rel2->cheapest_total_path,
1079                                                                         sjinfo) != NULL)
1080                 {
1081                         /*----------
1082                          * For a semijoin, we can join the RHS to anything else by
1083                          * unique-ifying the RHS (if the RHS can be unique-ified).
1084                          * We will only get here if we have the full RHS but less
1085                          * than min_lefthand on the LHS.
1086                          *
1087                          * The reason to consider such a join path is exemplified by
1088                          *      SELECT ... FROM a,b WHERE (a.x,b.y) IN (SELECT c1,c2 FROM c)
1089                          * If we insist on doing this as a semijoin we will first have
1090                          * to form the cartesian product of A*B.  But if we unique-ify
1091                          * C then the semijoin becomes a plain innerjoin and we can join
1092                          * in any order, eg C to A and then to B.  When C is much smaller
1093                          * than A and B this can be a huge win.  So we allow C to be
1094                          * joined to just A or just B here, and then make_join_rel has
1095                          * to handle the case properly.
1096                          *
1097                          * Note that actually we'll allow unique-ified C to be joined to
1098                          * some other relation D here, too.  That is legal, if usually not
1099                          * very sane, and this routine is only concerned with legality not
1100                          * with whether the join is good strategy.
1101                          *----------
1102                          */
1103                         if (match_sjinfo)
1104                                 return false;   /* invalid join path */
1105                         match_sjinfo = sjinfo;
1106                         reversed = false;
1107                         unique_ified = true;
1108                 }
1109                 else if (sjinfo->jointype == JOIN_SEMI &&
1110                                  bms_equal(sjinfo->syn_righthand, rel1->relids) &&
1111                                  create_unique_path(root, rel1, rel1->cheapest_total_path,
1112                                                                         sjinfo) != NULL)
1113                 {
1114                         /* Reversed semijoin case */
1115                         if (match_sjinfo)
1116                                 return false;   /* invalid join path */
1117                         match_sjinfo = sjinfo;
1118                         reversed = true;
1119                         unique_ified = true;
1120                 }
1121                 else
1122                 {
1123                         /*
1124                          * Otherwise, the proposed join overlaps the RHS but isn't a valid
1125                          * implementation of this SJ.  But don't panic quite yet: the RHS
1126                          * violation might have occurred previously, in one or both input
1127                          * relations, in which case we must have previously decided that
1128                          * it was OK to commute some other SJ with this one.  If we need
1129                          * to perform this join to finish building up the RHS, rejecting
1130                          * it could lead to not finding any plan at all.  (This can occur
1131                          * because of the heuristics elsewhere in this file that postpone
1132                          * clauseless joins: we might not consider doing a clauseless join
1133                          * within the RHS until after we've performed other, validly
1134                          * commutable SJs with one or both sides of the clauseless join.)
1135                          * This consideration boils down to the rule that if both inputs
1136                          * overlap the RHS, we can allow the join --- they are either
1137                          * fully within the RHS, or represent previously-allowed joins to
1138                          * rels outside it.
1139                          */
1140                         if (bms_overlap(rel1->relids, sjinfo->min_righthand) &&
1141                                 bms_overlap(rel2->relids, sjinfo->min_righthand))
1142                                 continue;               /* assume valid previous violation of RHS */
1143
1144                         /*
1145                          * The proposed join could still be legal, but only if we're
1146                          * allowed to associate it into the RHS of this SJ.  That means
1147                          * this SJ must be a LEFT join (not SEMI or ANTI, and certainly
1148                          * not FULL) and the proposed join must not overlap the LHS.
1149                          */
1150                         if (sjinfo->jointype != JOIN_LEFT ||
1151                                 bms_overlap(joinrelids, sjinfo->min_lefthand))
1152                                 return false;   /* invalid join path */
1153
1154                         /*
1155                          * To be valid, the proposed join must be a LEFT join; otherwise
1156                          * it can't associate into this SJ's RHS.  But we may not yet have
1157                          * found the SpecialJoinInfo matching the proposed join, so we
1158                          * can't test that yet.  Remember the requirement for later.
1159                          */
1160                         must_be_leftjoin = true;
1161                 }
1162         }
1163
1164         /*
1165          * Fail if violated any SJ's RHS and didn't match to a LEFT SJ: the
1166          * proposed join can't associate into an SJ's RHS.
1167          *
1168          * Also, fail if the proposed join's predicate isn't strict; we're
1169          * essentially checking to see if we can apply outer-join identity 3, and
1170          * that's a requirement.  (This check may be redundant with checks in
1171          * make_outerjoininfo, but I'm not quite sure, and it's cheap to test.)
1172          */
1173         if (must_be_leftjoin &&
1174                 (match_sjinfo == NULL ||
1175                  match_sjinfo->jointype != JOIN_LEFT ||
1176                  !match_sjinfo->lhs_strict))
1177                 return false;                   /* invalid join path */
1178
1179         /*
1180          * We also have to check for constraints imposed by LATERAL references.
1181          */
1182         if (root->hasLateralRTEs)
1183         {
1184                 bool            lateral_fwd;
1185                 bool            lateral_rev;
1186                 Relids          join_lateral_rels;
1187
1188                 /*
1189                  * The proposed rels could each contain lateral references to the
1190                  * other, in which case the join is impossible.  If there are lateral
1191                  * references in just one direction, then the join has to be done with
1192                  * a nestloop with the lateral referencer on the inside.  If the join
1193                  * matches an SJ that cannot be implemented by such a nestloop, the
1194                  * join is impossible.
1195                  *
1196                  * Also, if the lateral reference is only indirect, we should reject
1197                  * the join; whatever rel(s) the reference chain goes through must be
1198                  * joined to first.
1199                  *
1200                  * Another case that might keep us from building a valid plan is the
1201                  * implementation restriction described by have_dangerous_phv().
1202                  */
1203                 lateral_fwd = bms_overlap(rel1->relids, rel2->lateral_relids);
1204                 lateral_rev = bms_overlap(rel2->relids, rel1->lateral_relids);
1205                 if (lateral_fwd && lateral_rev)
1206                         return false;           /* have lateral refs in both directions */
1207                 if (lateral_fwd)
1208                 {
1209                         /* has to be implemented as nestloop with rel1 on left */
1210                         if (match_sjinfo &&
1211                                 (reversed ||
1212                                  unique_ified ||
1213                                  match_sjinfo->jointype == JOIN_FULL))
1214                                 return false;   /* not implementable as nestloop */
1215                         /* check there is a direct reference from rel2 to rel1 */
1216                         if (!bms_overlap(rel1->relids, rel2->direct_lateral_relids))
1217                                 return false;   /* only indirect refs, so reject */
1218                         /* check we won't have a dangerous PHV */
1219                         if (have_dangerous_phv(root, rel1->relids, rel2->lateral_relids))
1220                                 return false;   /* might be unable to handle required PHV */
1221                 }
1222                 else if (lateral_rev)
1223                 {
1224                         /* has to be implemented as nestloop with rel2 on left */
1225                         if (match_sjinfo &&
1226                                 (!reversed ||
1227                                  unique_ified ||
1228                                  match_sjinfo->jointype == JOIN_FULL))
1229                                 return false;   /* not implementable as nestloop */
1230                         /* check there is a direct reference from rel1 to rel2 */
1231                         if (!bms_overlap(rel2->relids, rel1->direct_lateral_relids))
1232                                 return false;   /* only indirect refs, so reject */
1233                         /* check we won't have a dangerous PHV */
1234                         if (have_dangerous_phv(root, rel2->relids, rel1->lateral_relids))
1235                                 return false;   /* might be unable to handle required PHV */
1236                 }
1237
1238                 /*
1239                  * LATERAL references could also cause problems later on if we accept
1240                  * this join: if the join's minimum parameterization includes any rels
1241                  * that would have to be on the inside of an outer join with this join
1242                  * rel, then it's never going to be possible to build the complete
1243                  * query using this join.  We should reject this join not only because
1244                  * it'll save work, but because if we don't, the clauseless-join
1245                  * heuristics might think that legality of this join means that some
1246                  * other join rel need not be formed, and that could lead to failure
1247                  * to find any plan at all.  We have to consider not only rels that
1248                  * are directly on the inner side of an OJ with the joinrel, but also
1249                  * ones that are indirectly so, so search to find all such rels.
1250                  */
1251                 join_lateral_rels = min_join_parameterization(root, joinrelids,
1252                                                                                                           rel1, rel2);
1253                 if (join_lateral_rels)
1254                 {
1255                         Relids          join_plus_rhs = bms_copy(joinrelids);
1256                         bool            more;
1257
1258                         do
1259                         {
1260                                 more = false;
1261                                 foreach(l, root->join_info_list)
1262                                 {
1263                                         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1264
1265                                         /* ignore full joins --- their ordering is predetermined */
1266                                         if (sjinfo->jointype == JOIN_FULL)
1267                                                 continue;
1268
1269                                         if (bms_overlap(sjinfo->min_lefthand, join_plus_rhs) &&
1270                                                 !bms_is_subset(sjinfo->min_righthand, join_plus_rhs))
1271                                         {
1272                                                 join_plus_rhs = bms_add_members(join_plus_rhs,
1273                                                                                                           sjinfo->min_righthand);
1274                                                 more = true;
1275                                         }
1276                                 }
1277                         } while (more);
1278                         if (bms_overlap(join_plus_rhs, join_lateral_rels))
1279                                 return false;   /* will not be able to join to some RHS rel */
1280                 }
1281         }
1282
1283         /* Otherwise, it's a valid join */
1284         *sjinfo_p = match_sjinfo;
1285         *reversed_p = reversed;
1286         return true;
1287 }
1288
1289 /*
1290  * has_join_restriction
1291  *              Detect whether the specified relation has join-order restrictions,
1292  *              due to being inside an outer join or an IN (sub-SELECT),
1293  *              or participating in any LATERAL references or multi-rel PHVs.
1294  *
1295  * Essentially, this tests whether have_join_order_restriction() could
1296  * succeed with this rel and some other one.  It's OK if we sometimes
1297  * say "true" incorrectly.  (Therefore, we don't bother with the relatively
1298  * expensive has_legal_joinclause test.)
1299  */
1300 static bool
1301 has_join_restriction(PlannerInfo *root, RelOptInfo *rel)
1302 {
1303         ListCell   *l;
1304
1305         if (rel->lateral_relids != NULL || rel->lateral_referencers != NULL)
1306                 return true;
1307
1308         foreach(l, root->placeholder_list)
1309         {
1310                 PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l);
1311
1312                 if (bms_is_subset(rel->relids, phinfo->ph_eval_at) &&
1313                         !bms_equal(rel->relids, phinfo->ph_eval_at))
1314                         return true;
1315         }
1316
1317         foreach(l, root->join_info_list)
1318         {
1319                 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l);
1320
1321                 /* ignore full joins --- other mechanisms preserve their ordering */
1322                 if (sjinfo->jointype == JOIN_FULL)
1323                         continue;
1324
1325                 /* ignore if SJ is already contained in rel */
1326                 if (bms_is_subset(sjinfo->min_lefthand, rel->relids) &&
1327                         bms_is_subset(sjinfo->min_righthand, rel->relids))
1328                         continue;
1329
1330                 /* restricted if it overlaps LHS or RHS, but doesn't contain SJ */
1331                 if (bms_overlap(sjinfo->min_lefthand, rel->relids) ||
1332                         bms_overlap(sjinfo->min_righthand, rel->relids))
1333                         return true;
1334         }
1335
1336         return false;
1337 }
1338
1339 /*
1340  * is_dummy_rel --- has relation been proven empty?
1341  */
1342 static bool
1343 is_dummy_rel(RelOptInfo *rel)
1344 {
1345         return IS_DUMMY_REL(rel);
1346 }
1347
1348 /*
1349  * Mark a relation as proven empty.
1350  *
1351  * During GEQO planning, this can get invoked more than once on the same
1352  * baserel struct, so it's worth checking to see if the rel is already marked
1353  * dummy.
1354  *
1355  * Also, when called during GEQO join planning, we are in a short-lived
1356  * memory context.  We must make sure that the dummy path attached to a
1357  * baserel survives the GEQO cycle, else the baserel is trashed for future
1358  * GEQO cycles.  On the other hand, when we are marking a joinrel during GEQO,
1359  * we don't want the dummy path to clutter the main planning context.  Upshot
1360  * is that the best solution is to explicitly make the dummy path in the same
1361  * context the given RelOptInfo is in.
1362  */
1363 static void
1364 mark_dummy_rel(RelOptInfo *rel)
1365 {
1366         MemoryContext oldcontext;
1367
1368         /* Already marked? */
1369         if (is_dummy_rel(rel))
1370                 return;
1371
1372         /* No, so choose correct context to make the dummy path in */
1373         oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1374
1375         /* Set dummy size estimate */
1376         rel->rows = 0;
1377
1378         /* Evict any previously chosen paths */
1379         rel->pathlist = NIL;
1380         rel->partial_pathlist = NIL;
1381
1382         /* Set up the dummy path */
1383         add_path(rel, (Path *) create_append_path(rel, NIL,
1384                                                                                           rel->lateral_relids,
1385                                                                                           0));
1386
1387         /* Set or update cheapest_total_path and related fields */
1388         set_cheapest(rel);
1389
1390         MemoryContextSwitchTo(oldcontext);
1391 }
1392
1393 /*
1394  * restriction_is_constant_false --- is a restrictlist just false?
1395  *
1396  * In cases where a qual is provably constant false, eval_const_expressions
1397  * will generally have thrown away anything that's ANDed with it.  In outer
1398  * join situations this will leave us computing cartesian products only to
1399  * decide there's no match for an outer row, which is pretty stupid.  So,
1400  * we need to detect the case.
1401  *
1402  * If only_pushed_down is true, then consider only quals that are pushed-down
1403  * from the point of view of the joinrel.
1404  */
1405 static bool
1406 restriction_is_constant_false(List *restrictlist,
1407                                                           RelOptInfo *joinrel,
1408                                                           bool only_pushed_down)
1409 {
1410         ListCell   *lc;
1411
1412         /*
1413          * Despite the above comment, the restriction list we see here might
1414          * possibly have other members besides the FALSE constant, since other
1415          * quals could get "pushed down" to the outer join level.  So we check
1416          * each member of the list.
1417          */
1418         foreach(lc, restrictlist)
1419         {
1420                 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1421
1422                 Assert(IsA(rinfo, RestrictInfo));
1423                 if (only_pushed_down && !RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
1424                         continue;
1425
1426                 if (rinfo->clause && IsA(rinfo->clause, Const))
1427                 {
1428                         Const      *con = (Const *) rinfo->clause;
1429
1430                         /* constant NULL is as good as constant FALSE for our purposes */
1431                         if (con->constisnull)
1432                                 return true;
1433                         if (!DatumGetBool(con->constvalue))
1434                                 return true;
1435                 }
1436         }
1437         return false;
1438 }