X-Git-Url: http://git.osdn.net/view?a=blobdiff_plain;f=core.c;h=aa8d868b6ecf5df88cd146a40eccf1da6cbdbcdb;hb=2a9e14e50905decc0d37506284fbfb5bf1e094ac;hp=59b2a2a20507ea1caad9e84be04d2a41e3f92b12;hpb=9f444026f40b7561a311ca9ea881ebcf29e7852f;p=pghintplan%2Fpg_hint_plan.git diff --git a/core.c b/core.c index 59b2a2a..aa8d868 100644 --- a/core.c +++ b/core.c @@ -1,16 +1,582 @@ -/* - * PostgreSQL 本体から流用した関数 +/*------------------------------------------------------------------------- + * + * core.c + * Routines copied from PostgreSQL core distribution. + * + + * The main purpose of this files is having access to static functions in core. + * Another purpose is tweaking functions behavior by replacing part of them by + * macro definitions. See at the end of pg_hint_plan.c for details. Anyway, + * this file *must* contain required functions without making any change. + * + * This file contains the following functions from corresponding files. * * src/backend/optimizer/path/allpaths.c - * standard_join_search() - * set_plain_rel_pathlist() * - * src/backend/optimizer/path/joinrels.c: - * join_search_one_level() + * static functions: + * set_plain_rel_pathlist() + * set_append_rel_pathlist() + * add_paths_to_append_rel() + * generate_mergeappend_paths() + * get_cheapest_parameterized_child_path() + * accumulate_append_subpath() + * + * public functions: + * standard_join_search(): This funcion is not static. The reason for + * including this function is make_rels_by_clause_joins. In order to + * avoid generating apparently unwanted join combination, we decided to + * change the behavior of make_join_rel, which is called under this + * function. + * + * src/backend/optimizer/path/joinrels.c + * + * public functions: + * join_search_one_level(): We have to modify this to call my definition of + * make_rels_by_clause_joins. + * + * static functions: * make_rels_by_clause_joins() * make_rels_by_clauseless_joins() + * join_is_legal() * has_join_restriction() + * is_dummy_rel() + * mark_dummy_rel() + * restriction_is_constant_false() + * + * + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + + +/* + * set_plain_rel_pathlist + * Build access paths for a plain relation (no subquery, no inheritance) + */ +static void +set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +{ + Relids required_outer; + + /* + * We don't support pushing join clauses into the quals of a seqscan, but + * it could still have required parameterization due to LATERAL refs in + * its tlist. + */ + required_outer = rel->lateral_relids; + + /* Consider sequential scan */ + add_path(rel, create_seqscan_path(root, rel, required_outer, 0)); + + /* If appropriate, consider parallel sequential scan */ + if (rel->consider_parallel && required_outer == NULL) + create_plain_partial_paths(root, rel); + + /* Consider index scans */ + create_index_paths(root, rel); + + /* Consider TID scans */ + create_tidscan_paths(root, rel); +} + + +/* + * set_append_rel_pathlist + * Build access paths for an "append relation" + */ +static void +set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, + Index rti, RangeTblEntry *rte) +{ + int parentRTindex = rti; + List *live_childrels = NIL; + ListCell *l; + + /* + * Generate access paths for each member relation, and remember the + * non-dummy children. + */ + foreach(l, root->append_rel_list) + { + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); + int childRTindex; + RangeTblEntry *childRTE; + RelOptInfo *childrel; + + /* append_rel_list contains all append rels; ignore others */ + if (appinfo->parent_relid != parentRTindex) + continue; + + /* Re-locate the child RTE and RelOptInfo */ + childRTindex = appinfo->child_relid; + childRTE = root->simple_rte_array[childRTindex]; + childrel = root->simple_rel_array[childRTindex]; + + /* + * If set_append_rel_size() decided the parent appendrel was + * parallel-unsafe at some point after visiting this child rel, we + * need to propagate the unsafety marking down to the child, so that + * we don't generate useless partial paths for it. + */ + if (!rel->consider_parallel) + childrel->consider_parallel = false; + + /* + * Compute the child's access paths. + */ + set_rel_pathlist(root, childrel, childRTindex, childRTE); + + /* + * If child is dummy, ignore it. + */ + if (IS_DUMMY_REL(childrel)) + continue; + + /* + * Child is live, so add it to the live_childrels list for use below. + */ + live_childrels = lappend(live_childrels, childrel); + } + + /* Add paths to the "append" relation. */ + add_paths_to_append_rel(root, rel, live_childrels); +} + +/* + * add_paths_to_append_rel + * Generate paths for given "append" relation given the set of non-dummy + * child rels. + * + * The function collects all parameterizations and orderings supported by the + * non-dummy children. For every such parameterization or ordering, it creates + * an append path collecting one path from each non-dummy child with given + * parameterization or ordering. Similarly it collects partial paths from + * non-dummy children to create partial append paths. + */ +static void +add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, + List *live_childrels) +{ + List *subpaths = NIL; + bool subpaths_valid = true; + List *partial_subpaths = NIL; + bool partial_subpaths_valid = true; + List *all_child_pathkeys = NIL; + List *all_child_outers = NIL; + ListCell *l; + List *partitioned_rels = NIL; + RangeTblEntry *rte; + + rte = planner_rt_fetch(rel->relid, root); + if (rte->relkind == RELKIND_PARTITIONED_TABLE) + { + partitioned_rels = get_partitioned_child_rels(root, rel->relid); + /* The root partitioned table is included as a child rel */ + Assert(list_length(partitioned_rels) >= 1); + } + + /* + * For every non-dummy child, remember the cheapest path. Also, identify + * all pathkeys (orderings) and parameterizations (required_outer sets) + * available for the non-dummy member relations. + */ + foreach(l, live_childrels) + { + RelOptInfo *childrel = lfirst(l); + ListCell *lcp; + + /* + * If child has an unparameterized cheapest-total path, add that to + * the unparameterized Append path we are constructing for the parent. + * If not, there's no workable unparameterized path. + */ + if (childrel->cheapest_total_path->param_info == NULL) + subpaths = accumulate_append_subpath(subpaths, + childrel->cheapest_total_path); + else + subpaths_valid = false; + + /* Same idea, but for a partial plan. */ + if (childrel->partial_pathlist != NIL) + partial_subpaths = accumulate_append_subpath(partial_subpaths, + linitial(childrel->partial_pathlist)); + else + partial_subpaths_valid = false; + + /* + * Collect lists of all the available path orderings and + * parameterizations for all the children. We use these as a + * heuristic to indicate which sort orderings and parameterizations we + * should build Append and MergeAppend paths for. + */ + foreach(lcp, childrel->pathlist) + { + Path *childpath = (Path *) lfirst(lcp); + List *childkeys = childpath->pathkeys; + Relids childouter = PATH_REQ_OUTER(childpath); + + /* Unsorted paths don't contribute to pathkey list */ + if (childkeys != NIL) + { + ListCell *lpk; + bool found = false; + + /* Have we already seen this ordering? */ + foreach(lpk, all_child_pathkeys) + { + List *existing_pathkeys = (List *) lfirst(lpk); + + if (compare_pathkeys(existing_pathkeys, + childkeys) == PATHKEYS_EQUAL) + { + found = true; + break; + } + } + if (!found) + { + /* No, so add it to all_child_pathkeys */ + all_child_pathkeys = lappend(all_child_pathkeys, + childkeys); + } + } + + /* Unparameterized paths don't contribute to param-set list */ + if (childouter) + { + ListCell *lco; + bool found = false; + + /* Have we already seen this param set? */ + foreach(lco, all_child_outers) + { + Relids existing_outers = (Relids) lfirst(lco); + + if (bms_equal(existing_outers, childouter)) + { + found = true; + break; + } + } + if (!found) + { + /* No, so add it to all_child_outers */ + all_child_outers = lappend(all_child_outers, + childouter); + } + } + } + } + + /* + * If we found unparameterized paths for all children, build an unordered, + * unparameterized Append path for the rel. (Note: this is correct even + * if we have zero or one live subpath due to constraint exclusion.) + */ + if (subpaths_valid) + add_path(rel, (Path *) create_append_path(rel, subpaths, NULL, 0, + partitioned_rels)); + + /* + * Consider an append of partial unordered, unparameterized partial paths. + */ + if (partial_subpaths_valid) + { + AppendPath *appendpath; + ListCell *lc; + int parallel_workers = 0; + + /* + * Decide on the number of workers to request for this append path. + * For now, we just use the maximum value from among the members. It + * might be useful to use a higher number if the Append node were + * smart enough to spread out the workers, but it currently isn't. + */ + foreach(lc, partial_subpaths) + { + Path *path = lfirst(lc); + + parallel_workers = Max(parallel_workers, path->parallel_workers); + } + Assert(parallel_workers > 0); + + /* Generate a partial append path. */ + appendpath = create_append_path(rel, partial_subpaths, NULL, + parallel_workers, partitioned_rels); + add_partial_path(rel, (Path *) appendpath); + } + + /* + * Also build unparameterized MergeAppend paths based on the collected + * list of child pathkeys. + */ + if (subpaths_valid) + generate_mergeappend_paths(root, rel, live_childrels, + all_child_pathkeys, + partitioned_rels); + + /* + * Build Append paths for each parameterization seen among the child rels. + * (This may look pretty expensive, but in most cases of practical + * interest, the child rels will expose mostly the same parameterizations, + * so that not that many cases actually get considered here.) + * + * The Append node itself cannot enforce quals, so all qual checking must + * be done in the child paths. This means that to have a parameterized + * Append path, we must have the exact same parameterization for each + * child path; otherwise some children might be failing to check the + * moved-down quals. To make them match up, we can try to increase the + * parameterization of lesser-parameterized paths. + */ + foreach(l, all_child_outers) + { + Relids required_outer = (Relids) lfirst(l); + ListCell *lcr; + + /* Select the child paths for an Append with this parameterization */ + subpaths = NIL; + subpaths_valid = true; + foreach(lcr, live_childrels) + { + RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr); + Path *subpath; + + subpath = get_cheapest_parameterized_child_path(root, + childrel, + required_outer); + if (subpath == NULL) + { + /* failed to make a suitable path for this child */ + subpaths_valid = false; + break; + } + subpaths = accumulate_append_subpath(subpaths, subpath); + } + + if (subpaths_valid) + add_path(rel, (Path *) + create_append_path(rel, subpaths, required_outer, 0, + partitioned_rels)); + } +} + + +/* + * generate_mergeappend_paths + * Generate MergeAppend paths for an append relation + * + * Generate a path for each ordering (pathkey list) appearing in + * all_child_pathkeys. + * + * We consider both cheapest-startup and cheapest-total cases, ie, for each + * interesting ordering, collect all the cheapest startup subpaths and all the + * cheapest total paths, and build a MergeAppend path for each case. + * + * We don't currently generate any parameterized MergeAppend paths. While + * it would not take much more code here to do so, it's very unclear that it + * is worth the planning cycles to investigate such paths: there's little + * use for an ordered path on the inside of a nestloop. In fact, it's likely + * that the current coding of add_path would reject such paths out of hand, + * because add_path gives no credit for sort ordering of parameterized paths, + * and a parameterized MergeAppend is going to be more expensive than the + * corresponding parameterized Append path. If we ever try harder to support + * parameterized mergejoin plans, it might be worth adding support for + * parameterized MergeAppends to feed such joins. (See notes in + * optimizer/README for why that might not ever happen, though.) */ +static void +generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, + List *live_childrels, + List *all_child_pathkeys, + List *partitioned_rels) +{ + ListCell *lcp; + + foreach(lcp, all_child_pathkeys) + { + List *pathkeys = (List *) lfirst(lcp); + List *startup_subpaths = NIL; + List *total_subpaths = NIL; + bool startup_neq_total = false; + ListCell *lcr; + + /* Select the child paths for this ordering... */ + foreach(lcr, live_childrels) + { + RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr); + Path *cheapest_startup, + *cheapest_total; + + /* Locate the right paths, if they are available. */ + cheapest_startup = + get_cheapest_path_for_pathkeys(childrel->pathlist, + pathkeys, + NULL, + STARTUP_COST, + false); + cheapest_total = + get_cheapest_path_for_pathkeys(childrel->pathlist, + pathkeys, + NULL, + TOTAL_COST, + false); + + /* + * If we can't find any paths with the right order just use the + * cheapest-total path; we'll have to sort it later. + */ + if (cheapest_startup == NULL || cheapest_total == NULL) + { + cheapest_startup = cheapest_total = + childrel->cheapest_total_path; + /* Assert we do have an unparameterized path for this child */ + Assert(cheapest_total->param_info == NULL); + } + + /* + * Notice whether we actually have different paths for the + * "cheapest" and "total" cases; frequently there will be no point + * in two create_merge_append_path() calls. + */ + if (cheapest_startup != cheapest_total) + startup_neq_total = true; + + startup_subpaths = + accumulate_append_subpath(startup_subpaths, cheapest_startup); + total_subpaths = + accumulate_append_subpath(total_subpaths, cheapest_total); + } + + /* ... and build the MergeAppend paths */ + add_path(rel, (Path *) create_merge_append_path(root, + rel, + startup_subpaths, + pathkeys, + NULL, + partitioned_rels)); + if (startup_neq_total) + add_path(rel, (Path *) create_merge_append_path(root, + rel, + total_subpaths, + pathkeys, + NULL, + partitioned_rels)); + } +} + + +/* + * get_cheapest_parameterized_child_path + * Get cheapest path for this relation that has exactly the requested + * parameterization. + * + * Returns NULL if unable to create such a path. + */ +static Path * +get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, + Relids required_outer) +{ + Path *cheapest; + ListCell *lc; + + /* + * Look up the cheapest existing path with no more than the needed + * parameterization. If it has exactly the needed parameterization, we're + * done. + */ + cheapest = get_cheapest_path_for_pathkeys(rel->pathlist, + NIL, + required_outer, + TOTAL_COST, + false); + Assert(cheapest != NULL); + if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer)) + return cheapest; + + /* + * Otherwise, we can "reparameterize" an existing path to match the given + * parameterization, which effectively means pushing down additional + * joinquals to be checked within the path's scan. However, some existing + * paths might check the available joinquals already while others don't; + * therefore, it's not clear which existing path will be cheapest after + * reparameterization. We have to go through them all and find out. + */ + cheapest = NULL; + foreach(lc, rel->pathlist) + { + Path *path = (Path *) lfirst(lc); + + /* Can't use it if it needs more than requested parameterization */ + if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer)) + continue; + + /* + * Reparameterization can only increase the path's cost, so if it's + * already more expensive than the current cheapest, forget it. + */ + if (cheapest != NULL && + compare_path_costs(cheapest, path, TOTAL_COST) <= 0) + continue; + + /* Reparameterize if needed, then recheck cost */ + if (!bms_equal(PATH_REQ_OUTER(path), required_outer)) + { + path = reparameterize_path(root, path, required_outer, 1.0); + if (path == NULL) + continue; /* failed to reparameterize this one */ + Assert(bms_equal(PATH_REQ_OUTER(path), required_outer)); + + if (cheapest != NULL && + compare_path_costs(cheapest, path, TOTAL_COST) <= 0) + continue; + } + + /* We have a new best path */ + cheapest = path; + } + + /* Return the best path, or NULL if we found no suitable candidate */ + return cheapest; +} + + +/* + * accumulate_append_subpath + * Add a subpath to the list being built for an Append or MergeAppend + * + * It's possible that the child is itself an Append or MergeAppend path, in + * which case we can "cut out the middleman" and just add its child paths to + * our own list. (We don't try to do this earlier because we need to apply + * both levels of transformation to the quals.) + * + * Note that if we omit a child MergeAppend in this way, we are effectively + * omitting a sort step, which seems fine: if the parent is to be an Append, + * its result would be unsorted anyway, while if the parent is to be a + * MergeAppend, there's no point in a separate sort on a child. + */ +static List * +accumulate_append_subpath(List *subpaths, Path *path) +{ + if (IsA(path, AppendPath)) + { + AppendPath *apath = (AppendPath *) path; + + /* list_copy is important here to avoid sharing list substructure */ + return list_concat(subpaths, list_copy(apath->subpaths)); + } + else if (IsA(path, MergeAppendPath)) + { + MergeAppendPath *mpath = (MergeAppendPath *) path; + + /* list_copy is important here to avoid sharing list substructure */ + return list_concat(subpaths, list_copy(mpath->subpaths)); + } + else + return lappend(subpaths, path); +} + /* * standard_join_search @@ -21,7 +587,7 @@ * independent jointree items in the query. This is > 1. * * 'initial_rels' is a list of RelOptInfo nodes for each independent - * jointree item. These are the components to be joined together. + * jointree item. These are the components to be joined together. * Note that levels_needed == list_length(initial_rels). * * Returns the final level of join relations, i.e., the relation that is @@ -37,7 +603,7 @@ * needed for these paths need have been instantiated. * * Note to plugin authors: the functions invoked during standard_join_search() - * modify root->join_rel_list and root->join_rel_hash. If you want to do more + * modify root->join_rel_list and root->join_rel_hash. If you want to do more * than one join-order search, you'll probably need to save and restore the * original states of those data structures. See geqo_eval() for an example. */ @@ -80,12 +646,19 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels) join_search_one_level(root, lev); /* - * Do cleanup work on each just-processed rel. + * Run generate_gather_paths() for each just-processed joinrel. We + * could not do this earlier because both regular and partial paths + * can get added to a particular joinrel at multiple times within + * join_search_one_level. After that, we're done creating paths for + * the joinrel, so run set_cheapest(). */ foreach(lc, root->join_rel_level[lev]) { rel = (RelOptInfo *) lfirst(lc); + /* Create GatherPaths for any useful partial paths for rel */ + generate_gather_paths(root, rel); + /* Find and save the cheapest paths for this rel */ set_cheapest(rel); @@ -110,25 +683,25 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels) } /* - * set_plain_rel_pathlist - * Build access paths for a plain relation (no subquery, no inheritance) + * create_plain_partial_paths + * Build partial access paths for parallel scan of a plain relation */ static void -set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) +create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) { - /* Consider sequential scan */ - add_path(rel, create_seqscan_path(root, rel)); + int parallel_workers; - /* Consider index scans */ - create_index_paths(root, rel); + parallel_workers = compute_parallel_worker(rel, rel->pages, -1); - /* Consider TID scans */ - create_tidscan_paths(root, rel); + /* If any limit was set to zero, the user doesn't want a parallel scan. */ + if (parallel_workers <= 0) + return; - /* Now find the cheapest of the paths for this rel */ - set_cheapest(rel); + /* Add an unordered partial path based on a parallel sequential scan. */ + add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers)); } + /* * join_search_one_level * Consider ways to produce join relations containing exactly 'level' @@ -160,37 +733,34 @@ join_search_one_level(PlannerInfo *root, int level) * We prefer to join using join clauses, but if we find a rel of level-1 * members that has no join clauses, we will generate Cartesian-product * joins against all initial rels not already contained in it. - * - * In the first pass (level == 2), we try to join each initial rel to each - * initial rel that appears later in joinrels[1]. (The mirror-image joins - * are handled automatically by make_join_rel.) In later passes, we try - * to join rels of size level-1 from joinrels[level-1] to each initial rel - * in joinrels[1]. */ foreach(r, joinrels[level - 1]) { RelOptInfo *old_rel = (RelOptInfo *) lfirst(r); - ListCell *other_rels; - - if (level == 2) - other_rels = lnext(r); /* only consider remaining initial - * rels */ - else - other_rels = list_head(joinrels[1]); /* consider all initial - * rels */ if (old_rel->joininfo != NIL || old_rel->has_eclass_joins || has_join_restriction(root, old_rel)) { /* - * Note that if all available join clauses for this rel require - * more than one other rel, we will fail to make any joins against - * it here. In most cases that's OK; it'll be considered by - * "bushy plan" join code in a higher-level pass where we have - * those other rels collected into a join rel. + * There are join clauses or join order restrictions relevant to + * this rel, so consider joins between this rel and (only) those + * initial rels it is linked to by a clause or restriction. * - * See also the last-ditch case below. + * At level 2 this condition is symmetric, so there is no need to + * look at initial rels before this one in the list; we already + * considered such joins when we were at the earlier rel. (The + * mirror-image joins are handled automatically by make_join_rel.) + * In later passes (level > 2), we join rels of the previous level + * to each initial rel they don't already include but have a join + * clause or restriction with. */ + ListCell *other_rels; + + if (level == 2) /* consider remaining initial rels */ + other_rels = lnext(r); + else /* consider all initial rels */ + other_rels = list_head(joinrels[1]); + make_rels_by_clause_joins(root, old_rel, other_rels); @@ -201,10 +771,17 @@ join_search_one_level(PlannerInfo *root, int level) * Oops, we have a relation that is not joined to any other * relation, either directly or by join-order restrictions. * Cartesian product time. + * + * We consider a cartesian product with each not-already-included + * initial rel, whether it has other join clauses or not. At + * level 2, if there are two or more clauseless initial rels, we + * will redundantly consider joining them in both directions; but + * such cases aren't common enough to justify adding complexity to + * avoid the duplicated effort. */ make_rels_by_clauseless_joins(root, old_rel, - other_rels); + list_head(joinrels[1])); } } @@ -234,7 +811,7 @@ join_search_one_level(PlannerInfo *root, int level) ListCell *r2; /* - * We can ignore clauseless joins here, *except* when they + * We can ignore relations without join clauses here, unless they * participate in join-order restrictions --- then we might have * to force a bushy join plan. */ @@ -255,8 +832,8 @@ join_search_one_level(PlannerInfo *root, int level) { /* * OK, we can build a rel of the right level from this - * pair of rels. Do so if there is at least one usable - * join clause or a relevant join restriction. + * pair of rels. Do so if there is at least one relevant + * join clause or join order restriction. */ if (have_relevant_joinclause(root, old_rel, new_rel) || have_join_order_restriction(root, old_rel, new_rel)) @@ -268,17 +845,24 @@ join_search_one_level(PlannerInfo *root, int level) } } - /* + /*---------- * Last-ditch effort: if we failed to find any usable joins so far, force * a set of cartesian-product joins to be generated. This handles the * special case where all the available rels have join clauses but we - * cannot use any of those clauses yet. An example is + * cannot use any of those clauses yet. This can only happen when we are + * considering a join sub-problem (a sub-joinlist) and all the rels in the + * sub-problem have only join clauses with rels outside the sub-problem. + * An example is * - * SELECT * FROM a,b,c WHERE (a.f1 + b.f2 + c.f3) = 0; + * SELECT ... FROM a INNER JOIN b ON TRUE, c, d, ... + * WHERE a.w = c.x and b.y = d.z; * - * The join clause will be usable at level 3, but at level 2 we have no - * choice but to make cartesian joins. We consider only left-sided and - * right-sided cartesian joins in this case (no bushy). + * If the "a INNER JOIN b" sub-problem does not get flattened into the + * upper level, we must be willing to make a cartesian join of a and b; + * but the code above will not have done so, because it thought that both + * a and b have joinclauses. We consider only left-sided and right-sided + * cartesian joins in this case (no bushy). + *---------- */ if (joinrels[level] == NIL) { @@ -289,23 +873,15 @@ join_search_one_level(PlannerInfo *root, int level) foreach(r, joinrels[level - 1]) { RelOptInfo *old_rel = (RelOptInfo *) lfirst(r); - ListCell *other_rels; - - if (level == 2) - other_rels = lnext(r); /* only consider remaining initial - * rels */ - else - other_rels = list_head(joinrels[1]); /* consider all initial - * rels */ make_rels_by_clauseless_joins(root, old_rel, - other_rels); + list_head(joinrels[1])); } /*---------- * When special joins are involved, there may be no legal way - * to make an N-way join for some values of N. For example consider + * to make an N-way join for some values of N. For example consider * * SELECT ... FROM t1 WHERE * x IN (SELECT ... FROM t2,t3 WHERE ...) AND @@ -316,15 +892,19 @@ join_search_one_level(PlannerInfo *root, int level) * to accept failure at level 4 and go on to discover a workable * bushy plan at level 5. * - * However, if there are no special joins then join_is_legal() should - * never fail, and so the following sanity check is useful. + * However, if there are no special joins and no lateral references + * then join_is_legal() should never fail, and so the following sanity + * check is useful. *---------- */ - if (joinrels[level] == NIL && root->join_info_list == NIL) + if (joinrels[level] == NIL && + root->join_info_list == NIL && + !root->hasLateralRTEs) elog(ERROR, "failed to build any %d-way joins", level); } } + /* * make_rels_by_clause_joins * Build joins between the given relation 'old_rel' and other relations @@ -365,6 +945,7 @@ make_rels_by_clause_joins(PlannerInfo *root, } } + /* * make_rels_by_clauseless_joins * Given a relation 'old_rel' and a list of other relations @@ -397,14 +978,345 @@ make_rels_by_clauseless_joins(PlannerInfo *root, } } + +/* + * join_is_legal + * Determine whether a proposed join is legal given the query's + * join order constraints; and if it is, determine the join type. + * + * Caller must supply not only the two rels, but the union of their relids. + * (We could simplify the API by computing joinrelids locally, but this + * would be redundant work in the normal path through make_join_rel.) + * + * On success, *sjinfo_p is set to NULL if this is to be a plain inner join, + * else it's set to point to the associated SpecialJoinInfo node. Also, + * *reversed_p is set TRUE if the given relations need to be swapped to + * match the SpecialJoinInfo node. + */ +static bool +join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, + Relids joinrelids, + SpecialJoinInfo **sjinfo_p, bool *reversed_p) +{ + SpecialJoinInfo *match_sjinfo; + bool reversed; + bool unique_ified; + bool must_be_leftjoin; + ListCell *l; + + /* + * Ensure output params are set on failure return. This is just to + * suppress uninitialized-variable warnings from overly anal compilers. + */ + *sjinfo_p = NULL; + *reversed_p = false; + + /* + * If we have any special joins, the proposed join might be illegal; and + * in any case we have to determine its join type. Scan the join info + * list for matches and conflicts. + */ + match_sjinfo = NULL; + reversed = false; + unique_ified = false; + must_be_leftjoin = false; + + foreach(l, root->join_info_list) + { + SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l); + + /* + * This special join is not relevant unless its RHS overlaps the + * proposed join. (Check this first as a fast path for dismissing + * most irrelevant SJs quickly.) + */ + if (!bms_overlap(sjinfo->min_righthand, joinrelids)) + continue; + + /* + * Also, not relevant if proposed join is fully contained within RHS + * (ie, we're still building up the RHS). + */ + if (bms_is_subset(joinrelids, sjinfo->min_righthand)) + continue; + + /* + * Also, not relevant if SJ is already done within either input. + */ + if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) && + bms_is_subset(sjinfo->min_righthand, rel1->relids)) + continue; + if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) && + bms_is_subset(sjinfo->min_righthand, rel2->relids)) + continue; + + /* + * If it's a semijoin and we already joined the RHS to any other rels + * within either input, then we must have unique-ified the RHS at that + * point (see below). Therefore the semijoin is no longer relevant in + * this join path. + */ + if (sjinfo->jointype == JOIN_SEMI) + { + if (bms_is_subset(sjinfo->syn_righthand, rel1->relids) && + !bms_equal(sjinfo->syn_righthand, rel1->relids)) + continue; + if (bms_is_subset(sjinfo->syn_righthand, rel2->relids) && + !bms_equal(sjinfo->syn_righthand, rel2->relids)) + continue; + } + + /* + * If one input contains min_lefthand and the other contains + * min_righthand, then we can perform the SJ at this join. + * + * Reject if we get matches to more than one SJ; that implies we're + * considering something that's not really valid. + */ + if (bms_is_subset(sjinfo->min_lefthand, rel1->relids) && + bms_is_subset(sjinfo->min_righthand, rel2->relids)) + { + if (match_sjinfo) + return false; /* invalid join path */ + match_sjinfo = sjinfo; + reversed = false; + } + else if (bms_is_subset(sjinfo->min_lefthand, rel2->relids) && + bms_is_subset(sjinfo->min_righthand, rel1->relids)) + { + if (match_sjinfo) + return false; /* invalid join path */ + match_sjinfo = sjinfo; + reversed = true; + } + else if (sjinfo->jointype == JOIN_SEMI && + bms_equal(sjinfo->syn_righthand, rel2->relids) && + create_unique_path(root, rel2, rel2->cheapest_total_path, + sjinfo) != NULL) + { + /*---------- + * For a semijoin, we can join the RHS to anything else by + * unique-ifying the RHS (if the RHS can be unique-ified). + * We will only get here if we have the full RHS but less + * than min_lefthand on the LHS. + * + * The reason to consider such a join path is exemplified by + * SELECT ... FROM a,b WHERE (a.x,b.y) IN (SELECT c1,c2 FROM c) + * If we insist on doing this as a semijoin we will first have + * to form the cartesian product of A*B. But if we unique-ify + * C then the semijoin becomes a plain innerjoin and we can join + * in any order, eg C to A and then to B. When C is much smaller + * than A and B this can be a huge win. So we allow C to be + * joined to just A or just B here, and then make_join_rel has + * to handle the case properly. + * + * Note that actually we'll allow unique-ified C to be joined to + * some other relation D here, too. That is legal, if usually not + * very sane, and this routine is only concerned with legality not + * with whether the join is good strategy. + *---------- + */ + if (match_sjinfo) + return false; /* invalid join path */ + match_sjinfo = sjinfo; + reversed = false; + unique_ified = true; + } + else if (sjinfo->jointype == JOIN_SEMI && + bms_equal(sjinfo->syn_righthand, rel1->relids) && + create_unique_path(root, rel1, rel1->cheapest_total_path, + sjinfo) != NULL) + { + /* Reversed semijoin case */ + if (match_sjinfo) + return false; /* invalid join path */ + match_sjinfo = sjinfo; + reversed = true; + unique_ified = true; + } + else + { + /* + * Otherwise, the proposed join overlaps the RHS but isn't a valid + * implementation of this SJ. But don't panic quite yet: the RHS + * violation might have occurred previously, in one or both input + * relations, in which case we must have previously decided that + * it was OK to commute some other SJ with this one. If we need + * to perform this join to finish building up the RHS, rejecting + * it could lead to not finding any plan at all. (This can occur + * because of the heuristics elsewhere in this file that postpone + * clauseless joins: we might not consider doing a clauseless join + * within the RHS until after we've performed other, validly + * commutable SJs with one or both sides of the clauseless join.) + * This consideration boils down to the rule that if both inputs + * overlap the RHS, we can allow the join --- they are either + * fully within the RHS, or represent previously-allowed joins to + * rels outside it. + */ + if (bms_overlap(rel1->relids, sjinfo->min_righthand) && + bms_overlap(rel2->relids, sjinfo->min_righthand)) + continue; /* assume valid previous violation of RHS */ + + /* + * The proposed join could still be legal, but only if we're + * allowed to associate it into the RHS of this SJ. That means + * this SJ must be a LEFT join (not SEMI or ANTI, and certainly + * not FULL) and the proposed join must not overlap the LHS. + */ + if (sjinfo->jointype != JOIN_LEFT || + bms_overlap(joinrelids, sjinfo->min_lefthand)) + return false; /* invalid join path */ + + /* + * To be valid, the proposed join must be a LEFT join; otherwise + * it can't associate into this SJ's RHS. But we may not yet have + * found the SpecialJoinInfo matching the proposed join, so we + * can't test that yet. Remember the requirement for later. + */ + must_be_leftjoin = true; + } + } + + /* + * Fail if violated any SJ's RHS and didn't match to a LEFT SJ: the + * proposed join can't associate into an SJ's RHS. + * + * Also, fail if the proposed join's predicate isn't strict; we're + * essentially checking to see if we can apply outer-join identity 3, and + * that's a requirement. (This check may be redundant with checks in + * make_outerjoininfo, but I'm not quite sure, and it's cheap to test.) + */ + if (must_be_leftjoin && + (match_sjinfo == NULL || + match_sjinfo->jointype != JOIN_LEFT || + !match_sjinfo->lhs_strict)) + return false; /* invalid join path */ + + /* + * We also have to check for constraints imposed by LATERAL references. + */ + if (root->hasLateralRTEs) + { + bool lateral_fwd; + bool lateral_rev; + Relids join_lateral_rels; + + /* + * The proposed rels could each contain lateral references to the + * other, in which case the join is impossible. If there are lateral + * references in just one direction, then the join has to be done with + * a nestloop with the lateral referencer on the inside. If the join + * matches an SJ that cannot be implemented by such a nestloop, the + * join is impossible. + * + * Also, if the lateral reference is only indirect, we should reject + * the join; whatever rel(s) the reference chain goes through must be + * joined to first. + * + * Another case that might keep us from building a valid plan is the + * implementation restriction described by have_dangerous_phv(). + */ + lateral_fwd = bms_overlap(rel1->relids, rel2->lateral_relids); + lateral_rev = bms_overlap(rel2->relids, rel1->lateral_relids); + if (lateral_fwd && lateral_rev) + return false; /* have lateral refs in both directions */ + if (lateral_fwd) + { + /* has to be implemented as nestloop with rel1 on left */ + if (match_sjinfo && + (reversed || + unique_ified || + match_sjinfo->jointype == JOIN_FULL)) + return false; /* not implementable as nestloop */ + /* check there is a direct reference from rel2 to rel1 */ + if (!bms_overlap(rel1->relids, rel2->direct_lateral_relids)) + return false; /* only indirect refs, so reject */ + /* check we won't have a dangerous PHV */ + if (have_dangerous_phv(root, rel1->relids, rel2->lateral_relids)) + return false; /* might be unable to handle required PHV */ + } + else if (lateral_rev) + { + /* has to be implemented as nestloop with rel2 on left */ + if (match_sjinfo && + (!reversed || + unique_ified || + match_sjinfo->jointype == JOIN_FULL)) + return false; /* not implementable as nestloop */ + /* check there is a direct reference from rel1 to rel2 */ + if (!bms_overlap(rel2->relids, rel1->direct_lateral_relids)) + return false; /* only indirect refs, so reject */ + /* check we won't have a dangerous PHV */ + if (have_dangerous_phv(root, rel2->relids, rel1->lateral_relids)) + return false; /* might be unable to handle required PHV */ + } + + /* + * LATERAL references could also cause problems later on if we accept + * this join: if the join's minimum parameterization includes any rels + * that would have to be on the inside of an outer join with this join + * rel, then it's never going to be possible to build the complete + * query using this join. We should reject this join not only because + * it'll save work, but because if we don't, the clauseless-join + * heuristics might think that legality of this join means that some + * other join rel need not be formed, and that could lead to failure + * to find any plan at all. We have to consider not only rels that + * are directly on the inner side of an OJ with the joinrel, but also + * ones that are indirectly so, so search to find all such rels. + */ + join_lateral_rels = min_join_parameterization(root, joinrelids, + rel1, rel2); + if (join_lateral_rels) + { + Relids join_plus_rhs = bms_copy(joinrelids); + bool more; + + do + { + more = false; + foreach(l, root->join_info_list) + { + SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l); + + if (bms_overlap(sjinfo->min_lefthand, join_plus_rhs) && + !bms_is_subset(sjinfo->min_righthand, join_plus_rhs)) + { + join_plus_rhs = bms_add_members(join_plus_rhs, + sjinfo->min_righthand); + more = true; + } + /* full joins constrain both sides symmetrically */ + if (sjinfo->jointype == JOIN_FULL && + bms_overlap(sjinfo->min_righthand, join_plus_rhs) && + !bms_is_subset(sjinfo->min_lefthand, join_plus_rhs)) + { + join_plus_rhs = bms_add_members(join_plus_rhs, + sjinfo->min_lefthand); + more = true; + } + } + } while (more); + if (bms_overlap(join_plus_rhs, join_lateral_rels)) + return false; /* will not be able to join to some RHS rel */ + } + } + + /* Otherwise, it's a valid join */ + *sjinfo_p = match_sjinfo; + *reversed_p = reversed; + return true; +} + + /* * has_join_restriction - * Detect whether the specified relation has join-order restrictions - * due to being inside an outer join or an IN (sub-SELECT). + * Detect whether the specified relation has join-order restrictions, + * due to being inside an outer join or an IN (sub-SELECT), + * or participating in any LATERAL references or multi-rel PHVs. * * Essentially, this tests whether have_join_order_restriction() could * succeed with this rel and some other one. It's OK if we sometimes - * say "true" incorrectly. (Therefore, we don't bother with the relatively + * say "true" incorrectly. (Therefore, we don't bother with the relatively * expensive has_legal_joinclause test.) */ static bool @@ -412,6 +1324,18 @@ has_join_restriction(PlannerInfo *root, RelOptInfo *rel) { ListCell *l; + if (rel->lateral_relids != NULL || rel->lateral_referencers != NULL) + return true; + + foreach(l, root->placeholder_list) + { + PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l); + + if (bms_is_subset(rel->relids, phinfo->ph_eval_at) && + !bms_equal(rel->relids, phinfo->ph_eval_at)) + return true; + } + foreach(l, root->join_info_list) { SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(l); @@ -433,3 +1357,101 @@ has_join_restriction(PlannerInfo *root, RelOptInfo *rel) return false; } + + +/* + * is_dummy_rel --- has relation been proven empty? + */ +static bool +is_dummy_rel(RelOptInfo *rel) +{ + return IS_DUMMY_REL(rel); +} + + +/* + * Mark a relation as proven empty. + * + * During GEQO planning, this can get invoked more than once on the same + * baserel struct, so it's worth checking to see if the rel is already marked + * dummy. + * + * Also, when called during GEQO join planning, we are in a short-lived + * memory context. We must make sure that the dummy path attached to a + * baserel survives the GEQO cycle, else the baserel is trashed for future + * GEQO cycles. On the other hand, when we are marking a joinrel during GEQO, + * we don't want the dummy path to clutter the main planning context. Upshot + * is that the best solution is to explicitly make the dummy path in the same + * context the given RelOptInfo is in. + */ +static void +mark_dummy_rel(RelOptInfo *rel) +{ + MemoryContext oldcontext; + + /* Already marked? */ + if (is_dummy_rel(rel)) + return; + + /* No, so choose correct context to make the dummy path in */ + oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel)); + + /* Set dummy size estimate */ + rel->rows = 0; + + /* Evict any previously chosen paths */ + rel->pathlist = NIL; + rel->partial_pathlist = NIL; + + /* Set up the dummy path */ + add_path(rel, (Path *) create_append_path(rel, NIL, NULL, 0, NIL)); + + /* Set or update cheapest_total_path and related fields */ + set_cheapest(rel); + + MemoryContextSwitchTo(oldcontext); +} + + +/* + * restriction_is_constant_false --- is a restrictlist just FALSE? + * + * In cases where a qual is provably constant FALSE, eval_const_expressions + * will generally have thrown away anything that's ANDed with it. In outer + * join situations this will leave us computing cartesian products only to + * decide there's no match for an outer row, which is pretty stupid. So, + * we need to detect the case. + * + * If only_pushed_down is TRUE, then consider only pushed-down quals. + */ +static bool +restriction_is_constant_false(List *restrictlist, bool only_pushed_down) +{ + ListCell *lc; + + /* + * Despite the above comment, the restriction list we see here might + * possibly have other members besides the FALSE constant, since other + * quals could get "pushed down" to the outer join level. So we check + * each member of the list. + */ + foreach(lc, restrictlist) + { + RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc); + + if (only_pushed_down && !rinfo->is_pushed_down) + continue; + + if (rinfo->clause && IsA(rinfo->clause, Const)) + { + Const *con = (Const *) rinfo->clause; + + /* constant NULL is as good as constant FALSE for our purposes */ + if (con->constisnull) + return true; + if (!DatumGetBool(con->constvalue)) + return true; + } + } + return false; +}