OSDN Git Service

Fix bugs in relpersistence handling during table creation.
[pg-rex/syncrep.git] / src / backend / executor / execMain.c
index 3c9f079..eacd863 100644 (file)
@@ -6,92 +6,93 @@
  * INTERFACE ROUTINES
  *     ExecutorStart()
  *     ExecutorRun()
+ *     ExecutorFinish()
  *     ExecutorEnd()
  *
- *     The old ExecutorMain() has been replaced by ExecutorStart(),
- *     ExecutorRun() and ExecutorEnd()
- *
- *     These three procedures are the external interfaces to the executor.
+ *     These four procedures are the external interface to the executor.
  *     In each case, the query descriptor is required as an argument.
  *
- *     ExecutorStart() must be called at the beginning of execution of any
- *     query plan and ExecutorEnd() should always be called at the end of
- *     execution of a plan.
+ *     ExecutorStart must be called at the beginning of execution of any
+ *     query plan and ExecutorEnd must always be called at the end of
+ *     execution of a plan (unless it is aborted due to error).
  *
  *     ExecutorRun accepts direction and count arguments that specify whether
  *     the plan is to be executed forwards, backwards, and for how many tuples.
+ *     In some cases ExecutorRun may be called multiple times to process all
+ *     the tuples for a plan.  It is also acceptable to stop short of executing
+ *     the whole plan (but only if it is a SELECT).
+ *
+ *     ExecutorFinish must be called after the final ExecutorRun call and
+ *     before ExecutorEnd.  This can be omitted only in case of EXPLAIN,
+ *     which should also omit ExecutorRun.
  *
- * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *       $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.273 2006/07/03 22:45:38 tgl Exp $
+ *       src/backend/executor/execMain.c
  *
  *-------------------------------------------------------------------------
  */
 #include "postgres.h"
 
-#include "access/heapam.h"
 #include "access/reloptions.h"
-#include "access/xlog.h"
+#include "access/sysattr.h"
+#include "access/transam.h"
+#include "access/xact.h"
 #include "catalog/heap.h"
 #include "catalog/namespace.h"
-#include "commands/tablecmds.h"
+#include "catalog/toasting.h"
 #include "commands/tablespace.h"
 #include "commands/trigger.h"
 #include "executor/execdebug.h"
-#include "executor/execdefs.h"
 #include "executor/instrument.h"
 #include "miscadmin.h"
 #include "optimizer/clauses.h"
-#include "optimizer/var.h"
-#include "parser/parsetree.h"
 #include "parser/parse_clause.h"
+#include "parser/parsetree.h"
+#include "storage/bufmgr.h"
+#include "storage/lmgr.h"
 #include "storage/smgr.h"
+#include "tcop/utility.h"
 #include "utils/acl.h"
-#include "utils/guc.h"
 #include "utils/lsyscache.h"
 #include "utils/memutils.h"
+#include "utils/snapmgr.h"
+#include "utils/tqual.h"
 
 
-typedef struct evalPlanQual
-{
-       Index           rti;
-       EState     *estate;
-       PlanState  *planstate;
-       struct evalPlanQual *next;      /* stack of active PlanQual plans */
-       struct evalPlanQual *free;      /* list of free PlanQual plans */
-} evalPlanQual;
+/* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
+ExecutorStart_hook_type ExecutorStart_hook = NULL;
+ExecutorRun_hook_type ExecutorRun_hook = NULL;
+ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
+ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
+
+/* Hook for plugin to get control in ExecCheckRTPerms() */
+ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
 
 /* decls for local routines only used within this module */
 static void InitPlan(QueryDesc *queryDesc, int eflags);
-static void initResultRelInfo(ResultRelInfo *resultRelInfo,
-                                 Index resultRelationIndex,
-                                 List *rangeTable,
-                                 CmdType operation,
-                                 bool doInstrument);
-static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
+static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
+static void ExecPostprocessPlan(EState *estate);
+static void ExecEndPlan(PlanState *planstate, EState *estate);
+static void ExecutePlan(EState *estate, PlanState *planstate,
                        CmdType operation,
+                       bool sendTuples,
                        long numberTuples,
                        ScanDirection direction,
                        DestReceiver *dest);
-static void ExecSelect(TupleTableSlot *slot,
-                  DestReceiver *dest,
-                  EState *estate);
-static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
-                  EState *estate);
-static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
-                  EState *estate);
-static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
-                  EState *estate);
-static TupleTableSlot *EvalPlanQualNext(EState *estate);
-static void EndEvalPlanQual(EState *estate);
-static void ExecCheckRTEPerms(RangeTblEntry *rte);
-static void ExecCheckXactReadOnly(Query *parsetree);
-static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
-                                 evalPlanQual *priorepq);
-static void EvalPlanQualStop(evalPlanQual *epq);
+static bool ExecCheckRTEPerms(RangeTblEntry *rte);
+static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
+static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
+                                 Plan *planTree);
+static void OpenIntoRel(QueryDesc *queryDesc);
+static void CloseIntoRel(QueryDesc *queryDesc);
+static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
+static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
+static void intorel_shutdown(DestReceiver *self);
+static void intorel_destroy(DestReceiver *self);
 
 /* end of local decls */
 
@@ -102,8 +103,8 @@ static void EvalPlanQualStop(evalPlanQual *epq);
  *             This routine must be called at the beginning of any execution of any
  *             query plan
  *
- * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
- * clear why we bother to separate the two functions, but...). The tupDesc
+ * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
+ * only because some places use QueryDescs for utility commands).  The tupDesc
  * field of the QueryDesc is filled in to describe the tuples that will be
  * returned, and the internal fields (estate and planstate) are set up.
  *
@@ -111,11 +112,25 @@ static void EvalPlanQualStop(evalPlanQual *epq);
  *
  * NB: the CurrentMemoryContext when this is called will become the parent
  * of the per-query context used for this Executor invocation.
+ *
+ * We provide a function hook variable that lets loadable plugins
+ * get control when ExecutorStart is called.  Such a plugin would
+ * normally call standard_ExecutorStart().
+ *
  * ----------------------------------------------------------------
  */
 void
 ExecutorStart(QueryDesc *queryDesc, int eflags)
 {
+       if (ExecutorStart_hook)
+               (*ExecutorStart_hook) (queryDesc, eflags);
+       else
+               standard_ExecutorStart(queryDesc, eflags);
+}
+
+void
+standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
+{
        EState     *estate;
        MemoryContext oldcontext;
 
@@ -128,7 +143,7 @@ ExecutorStart(QueryDesc *queryDesc, int eflags)
         * planned to non-temporary tables.  EXPLAIN is considered read-only.
         */
        if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
-               ExecCheckXactReadOnly(queryDesc->parsetree);
+               ExecCheckXactReadOnly(queryDesc->plannedstmt);
 
        /*
         * Build EState, switch into per-query memory context for startup.
@@ -139,26 +154,73 @@ ExecutorStart(QueryDesc *queryDesc, int eflags)
        oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
 
        /*
-        * Fill in parameters, if any, from queryDesc
+        * Fill in external parameters, if any, from queryDesc; and allocate
+        * workspace for internal parameters
         */
        estate->es_param_list_info = queryDesc->params;
 
-       if (queryDesc->plantree->nParamExec > 0)
+       if (queryDesc->plannedstmt->nParamExec > 0)
                estate->es_param_exec_vals = (ParamExecData *)
-                       palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData));
+                       palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
+
+       /*
+        * If non-read-only query, set the command ID to mark output tuples with
+        */
+       switch (queryDesc->operation)
+       {
+               case CMD_SELECT:
+
+                       /*
+                        * SELECT INTO, SELECT FOR UPDATE/SHARE and modifying CTEs need to
+                        * mark tuples
+                        */
+                       if (queryDesc->plannedstmt->intoClause != NULL ||
+                               queryDesc->plannedstmt->rowMarks != NIL ||
+                               queryDesc->plannedstmt->hasModifyingCTE)
+                               estate->es_output_cid = GetCurrentCommandId(true);
+
+                       /*
+                        * A SELECT without modifying CTEs can't possibly queue triggers,
+                        * so force skip-triggers mode. This is just a marginal efficiency
+                        * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
+                        * all that expensive, but we might as well do it.
+                        */
+                       if (!queryDesc->plannedstmt->hasModifyingCTE)
+                               eflags |= EXEC_FLAG_SKIP_TRIGGERS;
+                       break;
+
+               case CMD_INSERT:
+               case CMD_DELETE:
+               case CMD_UPDATE:
+                       estate->es_output_cid = GetCurrentCommandId(true);
+                       break;
+
+               default:
+                       elog(ERROR, "unrecognized operation code: %d",
+                                (int) queryDesc->operation);
+                       break;
+       }
 
        /*
         * Copy other important information into the EState
         */
-       estate->es_snapshot = queryDesc->snapshot;
-       estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
-       estate->es_instrument = queryDesc->doInstrument;
+       estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
+       estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
+       estate->es_top_eflags = eflags;
+       estate->es_instrument = queryDesc->instrument_options;
 
        /*
         * Initialize the plan state tree
         */
        InitPlan(queryDesc, eflags);
 
+       /*
+        * Set up an AFTER-trigger statement context, unless told not to, or
+        * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
+        */
+       if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
+               AfterTriggerBeginQuery();
+
        MemoryContextSwitchTo(oldcontext);
 }
 
@@ -178,16 +240,35 @@ ExecutorStart(QueryDesc *queryDesc, int eflags)
  *             Note: count = 0 is interpreted as no portal limit, i.e., run to
  *             completion.
  *
+ *             There is no return value, but output tuples (if any) are sent to
+ *             the destination receiver specified in the QueryDesc; and the number
+ *             of tuples processed at the top level can be found in
+ *             estate->es_processed.
+ *
+ *             We provide a function hook variable that lets loadable plugins
+ *             get control when ExecutorRun is called.  Such a plugin would
+ *             normally call standard_ExecutorRun().
+ *
  * ----------------------------------------------------------------
  */
-TupleTableSlot *
+void
 ExecutorRun(QueryDesc *queryDesc,
                        ScanDirection direction, long count)
 {
+       if (ExecutorRun_hook)
+               (*ExecutorRun_hook) (queryDesc, direction, count);
+       else
+               standard_ExecutorRun(queryDesc, direction, count);
+}
+
+void
+standard_ExecutorRun(QueryDesc *queryDesc,
+                                        ScanDirection direction, long count)
+{
        EState     *estate;
        CmdType         operation;
        DestReceiver *dest;
-       TupleTableSlot *result;
+       bool            sendTuples;
        MemoryContext oldcontext;
 
        /* sanity checks */
@@ -196,12 +277,17 @@ ExecutorRun(QueryDesc *queryDesc,
        estate = queryDesc->estate;
 
        Assert(estate != NULL);
+       Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
 
        /*
         * Switch into per-query memory context
         */
        oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
 
+       /* Allow instrumentation of Executor overall runtime */
+       if (queryDesc->totaltime)
+               InstrStartNode(queryDesc->totaltime);
+
        /*
         * extract information from the query descriptor and the query feature.
         */
@@ -209,34 +295,101 @@ ExecutorRun(QueryDesc *queryDesc,
        dest = queryDesc->dest;
 
        /*
-        * startup tuple receiver
+        * startup tuple receiver, if we will be emitting tuples
         */
        estate->es_processed = 0;
        estate->es_lastoid = InvalidOid;
 
-       (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
+       sendTuples = (operation == CMD_SELECT ||
+                                 queryDesc->plannedstmt->hasReturning);
+
+       if (sendTuples)
+               (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
 
        /*
         * run plan
         */
-       if (ScanDirectionIsNoMovement(direction))
-               result = NULL;
-       else
-               result = ExecutePlan(estate,
-                                                        queryDesc->planstate,
-                                                        operation,
-                                                        count,
-                                                        direction,
-                                                        dest);
+       if (!ScanDirectionIsNoMovement(direction))
+               ExecutePlan(estate,
+                                       queryDesc->planstate,
+                                       operation,
+                                       sendTuples,
+                                       count,
+                                       direction,
+                                       dest);
 
        /*
-        * shutdown receiver
+        * shutdown tuple receiver, if we started it
         */
-       (*dest->rShutdown) (dest);
+       if (sendTuples)
+               (*dest->rShutdown) (dest);
+
+       if (queryDesc->totaltime)
+               InstrStopNode(queryDesc->totaltime, estate->es_processed);
 
        MemoryContextSwitchTo(oldcontext);
+}
 
-       return result;
+/* ----------------------------------------------------------------
+ *             ExecutorFinish
+ *
+ *             This routine must be called after the last ExecutorRun call.
+ *             It performs cleanup such as firing AFTER triggers.      It is
+ *             separate from ExecutorEnd because EXPLAIN ANALYZE needs to
+ *             include these actions in the total runtime.
+ *
+ *             We provide a function hook variable that lets loadable plugins
+ *             get control when ExecutorFinish is called.      Such a plugin would
+ *             normally call standard_ExecutorFinish().
+ *
+ * ----------------------------------------------------------------
+ */
+void
+ExecutorFinish(QueryDesc *queryDesc)
+{
+       if (ExecutorFinish_hook)
+               (*ExecutorFinish_hook) (queryDesc);
+       else
+               standard_ExecutorFinish(queryDesc);
+}
+
+void
+standard_ExecutorFinish(QueryDesc *queryDesc)
+{
+       EState     *estate;
+       MemoryContext oldcontext;
+
+       /* sanity checks */
+       Assert(queryDesc != NULL);
+
+       estate = queryDesc->estate;
+
+       Assert(estate != NULL);
+       Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
+
+       /* This should be run once and only once per Executor instance */
+       Assert(!estate->es_finished);
+
+       /* Switch into per-query memory context */
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
+
+       /* Allow instrumentation of Executor overall runtime */
+       if (queryDesc->totaltime)
+               InstrStartNode(queryDesc->totaltime);
+
+       /* Run ModifyTable nodes to completion */
+       ExecPostprocessPlan(estate);
+
+       /* Execute queued AFTER triggers, unless told not to */
+       if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
+               AfterTriggerEndQuery(estate);
+
+       if (queryDesc->totaltime)
+               InstrStopNode(queryDesc->totaltime, 0);
+
+       MemoryContextSwitchTo(oldcontext);
+
+       estate->es_finished = true;
 }
 
 /* ----------------------------------------------------------------
@@ -244,11 +397,25 @@ ExecutorRun(QueryDesc *queryDesc,
  *
  *             This routine must be called at the end of execution of any
  *             query plan
+ *
+ *             We provide a function hook variable that lets loadable plugins
+ *             get control when ExecutorEnd is called.  Such a plugin would
+ *             normally call standard_ExecutorEnd().
+ *
  * ----------------------------------------------------------------
  */
 void
 ExecutorEnd(QueryDesc *queryDesc)
 {
+       if (ExecutorEnd_hook)
+               (*ExecutorEnd_hook) (queryDesc);
+       else
+               standard_ExecutorEnd(queryDesc);
+}
+
+void
+standard_ExecutorEnd(QueryDesc *queryDesc)
+{
        EState     *estate;
        MemoryContext oldcontext;
 
@@ -260,6 +427,14 @@ ExecutorEnd(QueryDesc *queryDesc)
        Assert(estate != NULL);
 
        /*
+        * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
+        * Assert is needed because ExecutorFinish is new as of 9.1, and callers
+        * might forget to call it.
+        */
+       Assert(estate->es_finished ||
+                  (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
+
+       /*
         * Switch into per-query memory context to run ExecEndPlan
         */
        oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
@@ -267,6 +442,16 @@ ExecutorEnd(QueryDesc *queryDesc)
        ExecEndPlan(queryDesc->planstate, estate);
 
        /*
+        * Close the SELECT INTO relation if any
+        */
+       if (estate->es_select_into)
+               CloseIntoRel(queryDesc);
+
+       /* do away with our snapshots */
+       UnregisterSnapshot(estate->es_snapshot);
+       UnregisterSnapshot(estate->es_crosscheck_snapshot);
+
+       /*
         * Must switch out of context before destroying it
         */
        MemoryContextSwitchTo(oldcontext);
@@ -281,6 +466,7 @@ ExecutorEnd(QueryDesc *queryDesc)
        queryDesc->tupDesc = NULL;
        queryDesc->estate = NULL;
        queryDesc->planstate = NULL;
+       queryDesc->totaltime = NULL;
 }
 
 /* ----------------------------------------------------------------
@@ -314,7 +500,7 @@ ExecutorRewind(QueryDesc *queryDesc)
        /*
         * rescan plan
         */
-       ExecReScan(queryDesc->planstate, NULL);
+       ExecReScan(queryDesc->planstate);
 
        MemoryContextSwitchTo(oldcontext);
 }
@@ -323,48 +509,66 @@ ExecutorRewind(QueryDesc *queryDesc)
 /*
  * ExecCheckRTPerms
  *             Check access permissions for all relations listed in a range table.
+ *
+ * Returns true if permissions are adequate.  Otherwise, throws an appropriate
+ * error if ereport_on_violation is true, or simply returns false otherwise.
  */
-void
-ExecCheckRTPerms(List *rangeTable)
+bool
+ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
 {
        ListCell   *l;
+       bool            result = true;
 
        foreach(l, rangeTable)
        {
-               RangeTblEntry *rte = lfirst(l);
+               RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
 
-               ExecCheckRTEPerms(rte);
+               result = ExecCheckRTEPerms(rte);
+               if (!result)
+               {
+                       Assert(rte->rtekind == RTE_RELATION);
+                       if (ereport_on_violation)
+                               aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
+                                                          get_rel_name(rte->relid));
+                       return false;
+               }
        }
+
+       if (ExecutorCheckPerms_hook)
+               result = (*ExecutorCheckPerms_hook) (rangeTable,
+                                                                                        ereport_on_violation);
+       return result;
 }
 
 /*
  * ExecCheckRTEPerms
  *             Check access permissions for a single RTE.
  */
-static void
+static bool
 ExecCheckRTEPerms(RangeTblEntry *rte)
 {
        AclMode         requiredPerms;
+       AclMode         relPerms;
+       AclMode         remainingPerms;
        Oid                     relOid;
        Oid                     userid;
+       Bitmapset  *tmpset;
+       int                     col;
 
        /*
-        * Only plain-relation RTEs need to be checked here.  Subquery RTEs are
-        * checked by ExecInitSubqueryScan if the subquery is still a separate
-        * subquery --- if it's been pulled up into our query level then the RTEs
-        * are in our rangetable and will be checked here. Function RTEs are
+        * Only plain-relation RTEs need to be checked here.  Function RTEs are
         * checked by init_fcache when the function is prepared for execution.
-        * Join and special RTEs need no checks.
+        * Join, subquery, and special RTEs need no checks.
         */
        if (rte->rtekind != RTE_RELATION)
-               return;
+               return true;
 
        /*
         * No work if requiredPerms is empty.
         */
        requiredPerms = rte->requiredPerms;
        if (requiredPerms == 0)
-               return;
+               return true;
 
        relOid = rte->relid;
 
@@ -379,40 +583,132 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
        userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
 
        /*
-        * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
+        * We must have *all* the requiredPerms bits, but some of the bits can be
+        * satisfied from column-level rather than relation-level permissions.
+        * First, remove any bits that are satisfied by relation permissions.
         */
-       if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
-               != requiredPerms)
-               aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
-                                          get_rel_name(relOid));
+       relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
+       remainingPerms = requiredPerms & ~relPerms;
+       if (remainingPerms != 0)
+       {
+               /*
+                * If we lack any permissions that exist only as relation permissions,
+                * we can fail straight away.
+                */
+               if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
+                       return false;
+
+               /*
+                * Check to see if we have the needed privileges at column level.
+                *
+                * Note: failures just report a table-level error; it would be nicer
+                * to report a column-level error if we have some but not all of the
+                * column privileges.
+                */
+               if (remainingPerms & ACL_SELECT)
+               {
+                       /*
+                        * When the query doesn't explicitly reference any columns (for
+                        * example, SELECT COUNT(*) FROM table), allow the query if we
+                        * have SELECT on any column of the rel, as per SQL spec.
+                        */
+                       if (bms_is_empty(rte->selectedCols))
+                       {
+                               if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
+                                                                                         ACLMASK_ANY) != ACLCHECK_OK)
+                                       return false;
+                       }
+
+                       tmpset = bms_copy(rte->selectedCols);
+                       while ((col = bms_first_member(tmpset)) >= 0)
+                       {
+                               /* remove the column number offset */
+                               col += FirstLowInvalidHeapAttributeNumber;
+                               if (col == InvalidAttrNumber)
+                               {
+                                       /* Whole-row reference, must have priv on all cols */
+                                       if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
+                                                                                                 ACLMASK_ALL) != ACLCHECK_OK)
+                                               return false;
+                               }
+                               else
+                               {
+                                       if (pg_attribute_aclcheck(relOid, col, userid,
+                                                                                         ACL_SELECT) != ACLCHECK_OK)
+                                               return false;
+                               }
+                       }
+                       bms_free(tmpset);
+               }
+
+               /*
+                * Basically the same for the mod columns, with either INSERT or
+                * UPDATE privilege as specified by remainingPerms.
+                */
+               remainingPerms &= ~ACL_SELECT;
+               if (remainingPerms != 0)
+               {
+                       /*
+                        * When the query doesn't explicitly change any columns, allow the
+                        * query if we have permission on any column of the rel.  This is
+                        * to handle SELECT FOR UPDATE as well as possible corner cases in
+                        * INSERT and UPDATE.
+                        */
+                       if (bms_is_empty(rte->modifiedCols))
+                       {
+                               if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
+                                                                                         ACLMASK_ANY) != ACLCHECK_OK)
+                                       return false;
+                       }
+
+                       tmpset = bms_copy(rte->modifiedCols);
+                       while ((col = bms_first_member(tmpset)) >= 0)
+                       {
+                               /* remove the column number offset */
+                               col += FirstLowInvalidHeapAttributeNumber;
+                               if (col == InvalidAttrNumber)
+                               {
+                                       /* whole-row reference can't happen here */
+                                       elog(ERROR, "whole-row update is not implemented");
+                               }
+                               else
+                               {
+                                       if (pg_attribute_aclcheck(relOid, col, userid,
+                                                                                         remainingPerms) != ACLCHECK_OK)
+                                               return false;
+                               }
+                       }
+                       bms_free(tmpset);
+               }
+       }
+       return true;
 }
 
 /*
  * Check that the query does not imply any writes to non-temp tables.
+ *
+ * Note: in a Hot Standby slave this would need to reject writes to temp
+ * tables as well; but an HS slave can't have created any temp tables
+ * in the first place, so no need to check that.
  */
 static void
-ExecCheckXactReadOnly(Query *parsetree)
+ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
 {
        ListCell   *l;
 
        /*
         * CREATE TABLE AS or SELECT INTO?
         *
-        * XXX should we allow this if the destination is temp?
+        * XXX should we allow this if the destination is temp?  Considering that
+        * it would still require catalog changes, probably not.
         */
-       if (parsetree->into != NULL)
-               goto fail;
+       if (plannedstmt->intoClause != NULL)
+               PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
 
        /* Fail if write permissions are requested on any non-temp table */
-       foreach(l, parsetree->rtable)
+       foreach(l, plannedstmt->rtable)
        {
-               RangeTblEntry *rte = lfirst(l);
-
-               if (rte->rtekind == RTE_SUBQUERY)
-               {
-                       ExecCheckXactReadOnly(rte->subquery);
-                       continue;
-               }
+               RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
 
                if (rte->rtekind != RTE_RELATION)
                        continue;
@@ -423,15 +719,8 @@ ExecCheckXactReadOnly(Query *parsetree)
                if (isTempNamespace(get_rel_namespace(rte->relid)))
                        continue;
 
-               goto fail;
+               PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
        }
-
-       return;
-
-fail:
-       ereport(ERROR,
-                       (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
-                        errmsg("transaction is read-only")));
 }
 
 
@@ -446,82 +735,60 @@ static void
 InitPlan(QueryDesc *queryDesc, int eflags)
 {
        CmdType         operation = queryDesc->operation;
-       Query      *parseTree = queryDesc->parsetree;
-       Plan       *plan = queryDesc->plantree;
+       PlannedStmt *plannedstmt = queryDesc->plannedstmt;
+       Plan       *plan = plannedstmt->planTree;
+       List       *rangeTable = plannedstmt->rtable;
        EState     *estate = queryDesc->estate;
        PlanState  *planstate;
-       List       *rangeTable;
-       Relation        intoRelationDesc;
-       bool            do_select_into;
        TupleDesc       tupType;
        ListCell   *l;
+       int                     i;
 
        /*
-        * Do permissions checks.  It's sufficient to examine the query's top
-        * rangetable here --- subplan RTEs will be checked during
-        * ExecInitSubPlan().
-        */
-       ExecCheckRTPerms(parseTree->rtable);
-
-       /*
-        * get information from query descriptor
+        * Do permissions checks
         */
-       rangeTable = parseTree->rtable;
+       ExecCheckRTPerms(rangeTable, true);
 
        /*
         * initialize the node's execution state
         */
        estate->es_range_table = rangeTable;
+       estate->es_plannedstmt = plannedstmt;
 
        /*
-        * if there is a result relation, initialize result relation stuff
+        * initialize result relation stuff, and open/lock the result rels.
+        *
+        * We must do this before initializing the plan tree, else we might try to
+        * do a lock upgrade if a result rel is also a source rel.
         */
-       if (parseTree->resultRelation != 0 && operation != CMD_SELECT)
+       if (plannedstmt->resultRelations)
        {
-               List       *resultRelations = parseTree->resultRelations;
-               int                     numResultRelations;
+               List       *resultRelations = plannedstmt->resultRelations;
+               int                     numResultRelations = list_length(resultRelations);
                ResultRelInfo *resultRelInfos;
+               ResultRelInfo *resultRelInfo;
 
-               if (resultRelations != NIL)
+               resultRelInfos = (ResultRelInfo *)
+                       palloc(numResultRelations * sizeof(ResultRelInfo));
+               resultRelInfo = resultRelInfos;
+               foreach(l, resultRelations)
                {
-                       /*
-                        * Multiple result relations (due to inheritance)
-                        * parseTree->resultRelations identifies them all
-                        */
-                       ResultRelInfo *resultRelInfo;
-
-                       numResultRelations = list_length(resultRelations);
-                       resultRelInfos = (ResultRelInfo *)
-                               palloc(numResultRelations * sizeof(ResultRelInfo));
-                       resultRelInfo = resultRelInfos;
-                       foreach(l, resultRelations)
-                       {
-                               initResultRelInfo(resultRelInfo,
-                                                                 lfirst_int(l),
-                                                                 rangeTable,
-                                                                 operation,
-                                                                 estate->es_instrument);
-                               resultRelInfo++;
-                       }
-               }
-               else
-               {
-                       /*
-                        * Single result relation identified by parseTree->resultRelation
-                        */
-                       numResultRelations = 1;
-                       resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
-                       initResultRelInfo(resultRelInfos,
-                                                         parseTree->resultRelation,
-                                                         rangeTable,
-                                                         operation,
+                       Index           resultRelationIndex = lfirst_int(l);
+                       Oid                     resultRelationOid;
+                       Relation        resultRelation;
+
+                       resultRelationOid = getrelid(resultRelationIndex, rangeTable);
+                       resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
+                       InitResultRelInfo(resultRelInfo,
+                                                         resultRelation,
+                                                         resultRelationIndex,
                                                          estate->es_instrument);
+                       resultRelInfo++;
                }
-
                estate->es_result_relations = resultRelInfos;
                estate->es_num_result_relations = numResultRelations;
-               /* Initialize to first or only result rel */
-               estate->es_result_relation_info = resultRelInfos;
+               /* es_result_relation_info is NULL except when within ModifyTable */
+               estate->es_result_relation_info = NULL;
        }
        else
        {
@@ -534,73 +801,114 @@ InitPlan(QueryDesc *queryDesc, int eflags)
        }
 
        /*
-        * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
-        * flag appropriately so that the plan tree will be initialized with the
-        * correct tuple descriptors.
-        */
-       do_select_into = false;
-
-       if (operation == CMD_SELECT && parseTree->into != NULL)
-       {
-               do_select_into = true;
-               estate->es_select_into = true;
-               estate->es_into_oids = interpretOidsOption(parseTree->intoOptions);
-       }
-
-       /*
-        * Have to lock relations selected FOR UPDATE/FOR SHARE
+        * Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
+        * before we initialize the plan tree, else we'd be risking lock upgrades.
+        * While we are at it, build the ExecRowMark list.
         */
        estate->es_rowMarks = NIL;
-       foreach(l, parseTree->rowMarks)
+       foreach(l, plannedstmt->rowMarks)
        {
-               RowMarkClause *rc = (RowMarkClause *) lfirst(l);
-               Oid                     relid = getrelid(rc->rti, rangeTable);
+               PlanRowMark *rc = (PlanRowMark *) lfirst(l);
+               Oid                     relid;
                Relation        relation;
                ExecRowMark *erm;
 
-               relation = heap_open(relid, RowShareLock);
+               /* ignore "parent" rowmarks; they are irrelevant at runtime */
+               if (rc->isParent)
+                       continue;
+
+               switch (rc->markType)
+               {
+                       case ROW_MARK_EXCLUSIVE:
+                       case ROW_MARK_SHARE:
+                               relid = getrelid(rc->rti, rangeTable);
+                               relation = heap_open(relid, RowShareLock);
+                               break;
+                       case ROW_MARK_REFERENCE:
+                               relid = getrelid(rc->rti, rangeTable);
+                               relation = heap_open(relid, AccessShareLock);
+                               break;
+                       case ROW_MARK_COPY:
+                               /* there's no real table here ... */
+                               relation = NULL;
+                               break;
+                       default:
+                               elog(ERROR, "unrecognized markType: %d", rc->markType);
+                               relation = NULL;        /* keep compiler quiet */
+                               break;
+               }
+
+               /* Check that relation is a legal target for marking */
+               if (relation)
+                       CheckValidRowMarkRel(relation, rc->markType);
+
                erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
                erm->relation = relation;
                erm->rti = rc->rti;
-               erm->forUpdate = rc->forUpdate;
+               erm->prti = rc->prti;
+               erm->rowmarkId = rc->rowmarkId;
+               erm->markType = rc->markType;
                erm->noWait = rc->noWait;
-               snprintf(erm->resname, sizeof(erm->resname), "ctid%u", rc->rti);
+               ItemPointerSetInvalid(&(erm->curCtid));
                estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
        }
 
        /*
-        * initialize the executor "tuple" table.  We need slots for all the plan
-        * nodes, plus possibly output slots for the junkfilter(s). At this point
-        * we aren't sure if we need junkfilters, so just add slots for them
-        * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
-        * trigger output tuples.
+        * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
+        * flag appropriately so that the plan tree will be initialized with the
+        * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
         */
+       estate->es_select_into = false;
+       if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
        {
-               int                     nSlots = ExecCountSlotsNode(plan);
+               estate->es_select_into = true;
+               estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
+       }
 
-               if (parseTree->resultRelations != NIL)
-                       nSlots += list_length(parseTree->resultRelations);
-               else
-                       nSlots += 1;
-               if (operation != CMD_SELECT)
-                       nSlots++;
+       /*
+        * Initialize the executor's tuple table to empty.
+        */
+       estate->es_tupleTable = NIL;
+       estate->es_trig_tuple_slot = NULL;
+       estate->es_trig_oldtup_slot = NULL;
+
+       /* mark EvalPlanQual not active */
+       estate->es_epqTuple = NULL;
+       estate->es_epqTupleSet = NULL;
+       estate->es_epqScanDone = NULL;
 
-               estate->es_tupleTable = ExecCreateTupleTable(nSlots);
+       /*
+        * Initialize private state information for each SubPlan.  We must do this
+        * before running ExecInitNode on the main query tree, since
+        * ExecInitSubPlan expects to be able to find these entries.
+        */
+       Assert(estate->es_subplanstates == NIL);
+       i = 1;                                          /* subplan indices count from 1 */
+       foreach(l, plannedstmt->subplans)
+       {
+               Plan       *subplan = (Plan *) lfirst(l);
+               PlanState  *subplanstate;
+               int                     sp_eflags;
 
-               if (operation != CMD_SELECT)
-                       estate->es_trig_tuple_slot =
-                               ExecAllocTableSlot(estate->es_tupleTable);
-       }
+               /*
+                * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
+                * it is a parameterless subplan (not initplan), we suggest that it be
+                * prepared to handle REWIND efficiently; otherwise there is no need.
+                */
+               sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
+               if (bms_is_member(i, plannedstmt->rewindPlanIDs))
+                       sp_eflags |= EXEC_FLAG_REWIND;
 
-       /* mark EvalPlanQual not active */
-       estate->es_topPlan = plan;
-       estate->es_evalPlanQual = NULL;
-       estate->es_evTupleNull = NULL;
-       estate->es_evTuple = NULL;
-       estate->es_useEvalPlan = false;
+               subplanstate = ExecInitNode(subplan, estate, sp_eflags);
+
+               estate->es_subplanstates = lappend(estate->es_subplanstates,
+                                                                                  subplanstate);
+
+               i++;
+       }
 
        /*
-        * initialize the private state information for all the nodes in the query
+        * Initialize the private state information for all the nodes in the query
         * tree.  This opens files, allocates storage and leaves us ready to start
         * processing tuples.
         */
@@ -614,106 +922,42 @@ InitPlan(QueryDesc *queryDesc, int eflags)
        tupType = ExecGetResultType(planstate);
 
        /*
-        * Initialize the junk filter if needed.  SELECT and INSERT queries need a
-        * filter if there are any junk attrs in the tlist.  INSERT and SELECT
-        * INTO also need a filter if the plan may return raw disk tuples (else
-        * heap_insert will be scribbling on the source relation!). UPDATE and
-        * DELETE always need a filter, since there's always a junk 'ctid'
-        * attribute present --- no need to look first.
+        * Initialize the junk filter if needed.  SELECT queries need a filter if
+        * there are any junk attrs in the top-level tlist.
         */
+       if (operation == CMD_SELECT)
        {
                bool            junk_filter_needed = false;
                ListCell   *tlist;
 
-               switch (operation)
+               foreach(tlist, plan->targetlist)
                {
-                       case CMD_SELECT:
-                       case CMD_INSERT:
-                               foreach(tlist, plan->targetlist)
-                               {
-                                       TargetEntry *tle = (TargetEntry *) lfirst(tlist);
+                       TargetEntry *tle = (TargetEntry *) lfirst(tlist);
 
-                                       if (tle->resjunk)
-                                       {
-                                               junk_filter_needed = true;
-                                               break;
-                                       }
-                               }
-                               if (!junk_filter_needed &&
-                                       (operation == CMD_INSERT || do_select_into) &&
-                                       ExecMayReturnRawTuples(planstate))
-                                       junk_filter_needed = true;
-                               break;
-                       case CMD_UPDATE:
-                       case CMD_DELETE:
+                       if (tle->resjunk)
+                       {
                                junk_filter_needed = true;
                                break;
-                       default:
-                               break;
+                       }
                }
 
                if (junk_filter_needed)
                {
-                       /*
-                        * If there are multiple result relations, each one needs its own
-                        * junk filter.  Note this is only possible for UPDATE/DELETE, so
-                        * we can't be fooled by some needing a filter and some not.
-                        */
-                       if (parseTree->resultRelations != NIL)
-                       {
-                               PlanState **appendplans;
-                               int                     as_nplans;
-                               ResultRelInfo *resultRelInfo;
-                               int                     i;
-
-                               /* Top plan had better be an Append here. */
-                               Assert(IsA(plan, Append));
-                               Assert(((Append *) plan)->isTarget);
-                               Assert(IsA(planstate, AppendState));
-                               appendplans = ((AppendState *) planstate)->appendplans;
-                               as_nplans = ((AppendState *) planstate)->as_nplans;
-                               Assert(as_nplans == estate->es_num_result_relations);
-                               resultRelInfo = estate->es_result_relations;
-                               for (i = 0; i < as_nplans; i++)
-                               {
-                                       PlanState  *subplan = appendplans[i];
-                                       JunkFilter *j;
-
-                                       j = ExecInitJunkFilter(subplan->plan->targetlist,
-                                                       resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
-                                                                 ExecAllocTableSlot(estate->es_tupleTable));
-                                       resultRelInfo->ri_junkFilter = j;
-                                       resultRelInfo++;
-                               }
+                       JunkFilter *j;
 
-                               /*
-                                * Set active junkfilter too; at this point ExecInitAppend has
-                                * already selected an active result relation...
-                                */
-                               estate->es_junkFilter =
-                                       estate->es_result_relation_info->ri_junkFilter;
-                       }
-                       else
-                       {
-                               /* Normal case with just one JunkFilter */
-                               JunkFilter *j;
-
-                               j = ExecInitJunkFilter(planstate->plan->targetlist,
-                                                                          tupType->tdhasoid,
-                                                                 ExecAllocTableSlot(estate->es_tupleTable));
-                               estate->es_junkFilter = j;
-                               if (estate->es_result_relation_info)
-                                       estate->es_result_relation_info->ri_junkFilter = j;
-
-                               /* For SELECT, want to return the cleaned tuple type */
-                               if (operation == CMD_SELECT)
-                                       tupType = j->jf_cleanTupType;
-                       }
+                       j = ExecInitJunkFilter(planstate->plan->targetlist,
+                                                                  tupType->tdhasoid,
+                                                                  ExecInitExtraTupleSlot(estate));
+                       estate->es_junkFilter = j;
+
+                       /* Want to return the cleaned tuple type */
+                       tupType = j->jf_cleanTupType;
                }
-               else
-                       estate->es_junkFilter = NULL;
        }
 
+       queryDesc->tupDesc = tupType;
+       queryDesc->planstate = planstate;
+
        /*
         * If doing SELECT INTO, initialize the "into" relation.  We must wait
         * till now so we have the "clean" result tuple type to create the new
@@ -721,174 +965,153 @@ InitPlan(QueryDesc *queryDesc, int eflags)
         *
         * If EXPLAIN, skip creating the "into" relation.
         */
-       intoRelationDesc = NULL;
+       if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
+               OpenIntoRel(queryDesc);
+}
 
-       if (do_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
-       {
-               char       *intoName;
-               Oid                     namespaceId;
-               Oid                     tablespaceId;
-               Datum           reloptions;
-               AclResult       aclresult;
-               Oid                     intoRelationId;
-               TupleDesc       tupdesc;
+/*
+ * Check that a proposed result relation is a legal target for the operation
+ *
+ * In most cases parser and/or planner should have noticed this already, but
+ * let's make sure.  In the view case we do need a test here, because if the
+ * view wasn't rewritten by a rule, it had better have an INSTEAD trigger.
+ *
+ * Note: when changing this function, you probably also need to look at
+ * CheckValidRowMarkRel.
+ */
+void
+CheckValidResultRel(Relation resultRel, CmdType operation)
+{
+       TriggerDesc *trigDesc = resultRel->trigdesc;
 
-               /*
-                * Check consistency of arguments
-                */
-               if (parseTree->intoOnCommit != ONCOMMIT_NOOP && !parseTree->into->istemp)
+       switch (resultRel->rd_rel->relkind)
+       {
+               case RELKIND_RELATION:
+                       /* OK */
+                       break;
+               case RELKIND_SEQUENCE:
                        ereport(ERROR,
-                                       (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
-                                        errmsg("ON COMMIT can only be used on temporary tables")));
-
-               /*
-                * find namespace to create in, check permissions
-                */
-               intoName = parseTree->into->relname;
-               namespaceId = RangeVarGetCreationNamespace(parseTree->into);
-
-               aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
-                                                                                 ACL_CREATE);
-               if (aclresult != ACLCHECK_OK)
-                       aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
-                                                  get_namespace_name(namespaceId));
-
-               /*
-                * Select tablespace to use.  If not specified, use default_tablespace
-                * (which may in turn default to database's default).
-                */
-               if (parseTree->intoTableSpaceName)
-               {
-                       tablespaceId = get_tablespace_oid(parseTree->intoTableSpaceName);
-                       if (!OidIsValid(tablespaceId))
-                               ereport(ERROR,
-                                               (errcode(ERRCODE_UNDEFINED_OBJECT),
-                                                errmsg("tablespace \"%s\" does not exist",
-                                                               parseTree->intoTableSpaceName)));
-               } else
-               {
-                       tablespaceId = GetDefaultTablespace();
-                       /* note InvalidOid is OK in this case */
-               }
-
-               /* Parse and validate any reloptions */
-               reloptions = transformRelOptions((Datum) 0,
-                                                                                parseTree->intoOptions,
-                                                                                true,
-                                                                                false);
-               (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
-
-               /* Check permissions except when using the database's default */
-               if (OidIsValid(tablespaceId))
-               {
-                       AclResult       aclresult;
-
-                       aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
-                                                                                          ACL_CREATE);
-
-                       if (aclresult != ACLCHECK_OK)
-                               aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
-                                                          get_tablespace_name(tablespaceId));
-               }
-
-               /*
-                * have to copy tupType to get rid of constraints
-                */
-               tupdesc = CreateTupleDescCopy(tupType);
-
-               intoRelationId = heap_create_with_catalog(intoName,
-                                                                                                 namespaceId,
-                                                                                                 tablespaceId,
-                                                                                                 InvalidOid,
-                                                                                                 GetUserId(),
-                                                                                                 tupdesc,
-                                                                                                 RELKIND_RELATION,
-                                                                                                 false,
-                                                                                                 true,
-                                                                                                 0,
-                                                                                                 parseTree->intoOnCommit,
-                                                                                                 reloptions,
-                                                                                                 allowSystemTableMods);
-
-               FreeTupleDesc(tupdesc);
-
-               /*
-                * Advance command counter so that the newly-created relation's
-                * catalog tuples will be visible to heap_open.
-                */
-               CommandCounterIncrement();
-
-               /*
-                * If necessary, create a TOAST table for the into relation. Note that
-                * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
-                * that the TOAST table will be visible for insertion.
-                */
-               AlterTableCreateToastTable(intoRelationId, true);
-
-               /*
-                * And open the constructed table for writing.
-                */
-               intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
-
-               /* use_wal off requires rd_targblock be initially invalid */
-               Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
-
-               /*
-                * We can skip WAL-logging the insertions, unless PITR is in use.
-                *
-                * Note that for a non-temp INTO table, this is safe only because we
-                * know that the catalog changes above will have been WAL-logged, and
-                * so RecordTransactionCommit will think it needs to WAL-log the
-                * eventual transaction commit.  Else the commit might be lost, even
-                * though all the data is safely fsync'd ...
-                */
-               estate->es_into_relation_use_wal = XLogArchivingActive();
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot change sequence \"%s\"",
+                                                       RelationGetRelationName(resultRel))));
+                       break;
+               case RELKIND_TOASTVALUE:
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot change TOAST relation \"%s\"",
+                                                       RelationGetRelationName(resultRel))));
+                       break;
+               case RELKIND_VIEW:
+                       switch (operation)
+                       {
+                               case CMD_INSERT:
+                                       if (!trigDesc || !trigDesc->trig_insert_instead_row)
+                                               ereport(ERROR,
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("cannot insert into view \"%s\"",
+                                                                 RelationGetRelationName(resultRel)),
+                                                  errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger.")));
+                                       break;
+                               case CMD_UPDATE:
+                                       if (!trigDesc || !trigDesc->trig_update_instead_row)
+                                               ereport(ERROR,
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("cannot update view \"%s\"",
+                                                                 RelationGetRelationName(resultRel)),
+                                                  errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger.")));
+                                       break;
+                               case CMD_DELETE:
+                                       if (!trigDesc || !trigDesc->trig_delete_instead_row)
+                                               ereport(ERROR,
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("cannot delete from view \"%s\"",
+                                                                 RelationGetRelationName(resultRel)),
+                                                  errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger.")));
+                                       break;
+                               default:
+                                       elog(ERROR, "unrecognized CmdType: %d", (int) operation);
+                                       break;
+                       }
+                       break;
+               case RELKIND_FOREIGN_TABLE:
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot change foreign table \"%s\"",
+                                                       RelationGetRelationName(resultRel))));
+                       break;
+               default:
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot change relation \"%s\"",
+                                                       RelationGetRelationName(resultRel))));
+                       break;
        }
-
-       estate->es_into_relation_descriptor = intoRelationDesc;
-
-       queryDesc->tupDesc = tupType;
-       queryDesc->planstate = planstate;
 }
 
 /*
- * Initialize ResultRelInfo data for one result relation
+ * Check that a proposed rowmark target relation is a legal target
+ *
+ * In most cases parser and/or planner should have noticed this already, but
+ * they don't cover all cases.
  */
 static void
-initResultRelInfo(ResultRelInfo *resultRelInfo,
-                                 Index resultRelationIndex,
-                                 List *rangeTable,
-                                 CmdType operation,
-                                 bool doInstrument)
+CheckValidRowMarkRel(Relation rel, RowMarkType markType)
 {
-       Oid                     resultRelationOid;
-       Relation        resultRelationDesc;
-
-       resultRelationOid = getrelid(resultRelationIndex, rangeTable);
-       resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
-
-       switch (resultRelationDesc->rd_rel->relkind)
+       switch (rel->rd_rel->relkind)
        {
+               case RELKIND_RELATION:
+                       /* OK */
+                       break;
                case RELKIND_SEQUENCE:
+                       /* Must disallow this because we don't vacuum sequences */
                        ereport(ERROR,
                                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                        errmsg("cannot change sequence \"%s\"",
-                                                       RelationGetRelationName(resultRelationDesc))));
+                                        errmsg("cannot lock rows in sequence \"%s\"",
+                                                       RelationGetRelationName(rel))));
                        break;
                case RELKIND_TOASTVALUE:
+                       /* We could allow this, but there seems no good reason to */
                        ereport(ERROR,
                                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                        errmsg("cannot change TOAST relation \"%s\"",
-                                                       RelationGetRelationName(resultRelationDesc))));
+                                        errmsg("cannot lock rows in TOAST relation \"%s\"",
+                                                       RelationGetRelationName(rel))));
                        break;
                case RELKIND_VIEW:
+                       /* Should not get here */
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot lock rows in view \"%s\"",
+                                                       RelationGetRelationName(rel))));
+                       break;
+               case RELKIND_FOREIGN_TABLE:
+                       /* Perhaps we can support this someday, but not today */
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot lock rows in foreign table \"%s\"",
+                                                       RelationGetRelationName(rel))));
+                       break;
+               default:
                        ereport(ERROR,
                                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                        errmsg("cannot change view \"%s\"",
-                                                       RelationGetRelationName(resultRelationDesc))));
+                                        errmsg("cannot lock rows in relation \"%s\"",
+                                                       RelationGetRelationName(rel))));
                        break;
        }
+}
 
+/*
+ * Initialize ResultRelInfo data for one result relation
+ *
+ * Caution: before Postgres 9.1, this function included the relkind checking
+ * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
+ * appropriate.  Be sure callers cover those needs.
+ */
+void
+InitResultRelInfo(ResultRelInfo *resultRelInfo,
+                                 Relation resultRelationDesc,
+                                 Index resultRelationIndex,
+                                 int instrument_options)
+{
        MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
        resultRelInfo->type = T_ResultRelInfo;
        resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
@@ -904,28 +1127,93 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
 
                resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
                        palloc0(n * sizeof(FmgrInfo));
-               if (doInstrument)
-                       resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
-               else
-                       resultRelInfo->ri_TrigInstrument = NULL;
+               resultRelInfo->ri_TrigWhenExprs = (List **)
+                       palloc0(n * sizeof(List *));
+               if (instrument_options)
+                       resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
        }
        else
        {
                resultRelInfo->ri_TrigFunctions = NULL;
+               resultRelInfo->ri_TrigWhenExprs = NULL;
                resultRelInfo->ri_TrigInstrument = NULL;
        }
        resultRelInfo->ri_ConstraintExprs = NULL;
        resultRelInfo->ri_junkFilter = NULL;
+       resultRelInfo->ri_projectReturning = NULL;
+}
+
+/*
+ *             ExecGetTriggerResultRel
+ *
+ * Get a ResultRelInfo for a trigger target relation.  Most of the time,
+ * triggers are fired on one of the result relations of the query, and so
+ * we can just return a member of the es_result_relations array.  (Note: in
+ * self-join situations there might be multiple members with the same OID;
+ * if so it doesn't matter which one we pick.)  However, it is sometimes
+ * necessary to fire triggers on other relations; this happens mainly when an
+ * RI update trigger queues additional triggers on other relations, which will
+ * be processed in the context of the outer query.     For efficiency's sake,
+ * we want to have a ResultRelInfo for those triggers too; that can avoid
+ * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
+ * ANALYZE to report the runtimes of such triggers.)  So we make additional
+ * ResultRelInfo's as needed, and save them in es_trig_target_relations.
+ */
+ResultRelInfo *
+ExecGetTriggerResultRel(EState *estate, Oid relid)
+{
+       ResultRelInfo *rInfo;
+       int                     nr;
+       ListCell   *l;
+       Relation        rel;
+       MemoryContext oldcontext;
+
+       /* First, search through the query result relations */
+       rInfo = estate->es_result_relations;
+       nr = estate->es_num_result_relations;
+       while (nr > 0)
+       {
+               if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
+                       return rInfo;
+               rInfo++;
+               nr--;
+       }
+       /* Nope, but maybe we already made an extra ResultRelInfo for it */
+       foreach(l, estate->es_trig_target_relations)
+       {
+               rInfo = (ResultRelInfo *) lfirst(l);
+               if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
+                       return rInfo;
+       }
+       /* Nope, so we need a new one */
+
+       /*
+        * Open the target relation's relcache entry.  We assume that an
+        * appropriate lock is still held by the backend from whenever the trigger
+        * event got queued, so we need take no new lock here.  Also, we need not
+        * recheck the relkind, so no need for CheckValidResultRel.
+        */
+       rel = heap_open(relid, NoLock);
+
+       /*
+        * Make the new entry in the right context.
+        */
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
+       rInfo = makeNode(ResultRelInfo);
+       InitResultRelInfo(rInfo,
+                                         rel,
+                                         0,            /* dummy rangetable index */
+                                         estate->es_instrument);
+       estate->es_trig_target_relations =
+               lappend(estate->es_trig_target_relations, rInfo);
+       MemoryContextSwitchTo(oldcontext);
 
        /*
-        * If there are indices on the result relation, open them and save
-        * descriptors in the result relation info, so that we can add new index
-        * entries for the tuples we add/update.  We need not do this for a
-        * DELETE, however, since deletion doesn't affect indexes.
+        * Currently, we don't need any index information in ResultRelInfos used
+        * only for triggers, so no need to call ExecOpenIndices.
         */
-       if (resultRelationDesc->rd_rel->relhasindex &&
-               operation != CMD_DELETE)
-               ExecOpenIndices(resultRelInfo);
+
+       return rInfo;
 }
 
 /*
@@ -948,11 +1236,13 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
  * recognize how far down the requirement really goes, but for now we just
  * make all plan nodes do the same thing if the top level forces the choice.
  *
- * We assume that estate->es_result_relation_info is already set up to
- * describe the target relation.  Note that in an UPDATE that spans an
- * inheritance tree, some of the target relations may have OIDs and some not.
- * We have to make the decisions on a per-relation basis as we initialize
- * each of the child plans of the topmost Append plan.
+ * We assume that if we are generating tuples for INSERT or UPDATE,
+ * estate->es_result_relation_info is already set up to describe the target
+ * relation.  Note that in an UPDATE that spans an inheritance tree, some of
+ * the target relations may have OIDs and some not.  We have to make the
+ * decisions on a per-relation basis as we initialize each of the subplans of
+ * the ModifyTable node, so ModifyTable has to set es_result_relation_info
+ * while initializing each subplan.
  *
  * SELECT INTO is even uglier, because we don't have the INTO relation's
  * descriptor available when this code runs; we have to look aside at a
@@ -961,28 +1251,66 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
 bool
 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
 {
+       ResultRelInfo *ri = planstate->state->es_result_relation_info;
+
+       if (ri != NULL)
+       {
+               Relation        rel = ri->ri_RelationDesc;
+
+               if (rel != NULL)
+               {
+                       *hasoids = rel->rd_rel->relhasoids;
+                       return true;
+               }
+       }
+
        if (planstate->state->es_select_into)
        {
                *hasoids = planstate->state->es_into_oids;
                return true;
        }
-       else
+
+       return false;
+}
+
+/* ----------------------------------------------------------------
+ *             ExecPostprocessPlan
+ *
+ *             Give plan nodes a final chance to execute before shutdown
+ * ----------------------------------------------------------------
+ */
+static void
+ExecPostprocessPlan(EState *estate)
+{
+       ListCell   *lc;
+
+       /*
+        * Make sure nodes run forward.
+        */
+       estate->es_direction = ForwardScanDirection;
+
+       /*
+        * Run any secondary ModifyTable nodes to completion, in case the main
+        * query did not fetch all rows from them.      (We do this to ensure that
+        * such nodes have predictable results.)
+        */
+       foreach(lc, estate->es_auxmodifytables)
        {
-               ResultRelInfo *ri = planstate->state->es_result_relation_info;
+               PlanState  *ps = (PlanState *) lfirst(lc);
 
-               if (ri != NULL)
+               for (;;)
                {
-                       Relation        rel = ri->ri_RelationDesc;
+                       TupleTableSlot *slot;
 
-                       if (rel != NULL)
-                       {
-                               *hasoids = rel->rd_rel->relhasoids;
-                               return true;
-                       }
+                       /* Reset the per-output-tuple exprcontext each time */
+                       ResetPerTupleExprContext(estate);
+
+                       slot = ExecProcNode(ps);
+
+                       if (TupIsNull(slot))
+                               break;
                }
        }
-
-       return false;
 }
 
 /* ----------------------------------------------------------------
@@ -997,7 +1325,7 @@ ExecContextForcesOids(PlanState *planstate, bool *hasoids)
  * tuple tables must be cleared or dropped to ensure pins are released.
  * ----------------------------------------------------------------
  */
-void
+static void
 ExecEndPlan(PlanState *planstate, EState *estate)
 {
        ResultRelInfo *resultRelInfo;
@@ -1005,21 +1333,27 @@ ExecEndPlan(PlanState *planstate, EState *estate)
        ListCell   *l;
 
        /*
-        * shut down any PlanQual processing we were doing
+        * shut down the node-type-specific query processing
         */
-       if (estate->es_evalPlanQual != NULL)
-               EndEvalPlanQual(estate);
+       ExecEndNode(planstate);
 
        /*
-        * shut down the node-type-specific query processing
+        * for subplans too
         */
-       ExecEndNode(planstate);
+       foreach(l, estate->es_subplanstates)
+       {
+               PlanState  *subplanstate = (PlanState *) lfirst(l);
+
+               ExecEndNode(subplanstate);
+       }
 
        /*
-        * destroy the executor "tuple" table.
+        * destroy the executor's tuple table.  Actually we only care about
+        * releasing buffer pins and tupdesc refcounts; there's no need to pfree
+        * the TupleTableSlots, since the containing memory context is about to go
+        * away anyway.
         */
-       ExecDropTupleTable(estate->es_tupleTable, true);
-       estate->es_tupleTable = NULL;
+       ExecResetTupleTable(estate->es_tupleTable, false);
 
        /*
         * close the result relation(s) if any, but hold locks until xact commit.
@@ -1034,25 +1368,14 @@ ExecEndPlan(PlanState *planstate, EState *estate)
        }
 
        /*
-        * close the "into" relation if necessary, again keeping lock
+        * likewise close any trigger target relations
         */
-       if (estate->es_into_relation_descriptor != NULL)
+       foreach(l, estate->es_trig_target_relations)
        {
-               /*
-                * If we skipped using WAL, and it's not a temp relation, we must
-                * force the relation down to disk before it's safe to commit the
-                * transaction.  This requires forcing out any dirty buffers and then
-                * doing a forced fsync.
-                */
-               if (!estate->es_into_relation_use_wal &&
-                       !estate->es_into_relation_descriptor->rd_istemp)
-               {
-                       FlushRelationBuffers(estate->es_into_relation_descriptor);
-                       /* FlushRelationBuffers will have opened rd_smgr */
-                       smgrimmedsync(estate->es_into_relation_descriptor->rd_smgr);
-               }
-
-               heap_close(estate->es_into_relation_descriptor, NoLock);
+               resultRelInfo = (ResultRelInfo *) lfirst(l);
+               /* Close indices and then the relation itself */
+               ExecCloseIndices(resultRelInfo);
+               heap_close(resultRelInfo->ri_RelationDesc, NoLock);
        }
 
        /*
@@ -1060,48 +1383,41 @@ ExecEndPlan(PlanState *planstate, EState *estate)
         */
        foreach(l, estate->es_rowMarks)
        {
-               ExecRowMark *erm = lfirst(l);
+               ExecRowMark *erm = (ExecRowMark *) lfirst(l);
 
-               heap_close(erm->relation, NoLock);
+               if (erm->relation)
+                       heap_close(erm->relation, NoLock);
        }
 }
 
 /* ----------------------------------------------------------------
  *             ExecutePlan
  *
- *             processes the query plan to retrieve 'numberTuples' tuples in the
- *             direction specified.
+ *             Processes the query plan until we have processed 'numberTuples' tuples,
+ *             moving in the specified direction.
  *
- *             Retrieves all tuples if numberTuples is 0
- *
- *             result is either a slot containing the last tuple in the case
- *             of a SELECT or NULL otherwise.
+ *             Runs to completion if numberTuples is 0
  *
  * Note: the ctid attribute is a 'junk' attribute that is removed before the
  * user can see it
  * ----------------------------------------------------------------
  */
-static TupleTableSlot *
+static void
 ExecutePlan(EState *estate,
                        PlanState *planstate,
                        CmdType operation,
+                       bool sendTuples,
                        long numberTuples,
                        ScanDirection direction,
                        DestReceiver *dest)
 {
-       JunkFilter *junkfilter;
        TupleTableSlot *slot;
-       ItemPointer tupleid = NULL;
-       ItemPointerData tuple_ctid;
        long            current_tuple_count;
-       TupleTableSlot *result;
 
        /*
         * initialize local variables
         */
-       slot = NULL;
        current_tuple_count = 0;
-       result = NULL;
 
        /*
         * Set the direction.
@@ -1109,28 +1425,8 @@ ExecutePlan(EState *estate,
        estate->es_direction = direction;
 
        /*
-        * Process BEFORE EACH STATEMENT triggers
-        */
-       switch (operation)
-       {
-               case CMD_UPDATE:
-                       ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
-                       break;
-               case CMD_DELETE:
-                       ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
-                       break;
-               case CMD_INSERT:
-                       ExecBSInsertTriggers(estate, estate->es_result_relation_info);
-                       break;
-               default:
-                       /* do nothing */
-                       break;
-       }
-
-       /*
         * Loop until we've processed the proper number of tuples from the plan.
         */
-
        for (;;)
        {
                /* Reset the per-output-tuple exprcontext */
@@ -1139,192 +1435,40 @@ ExecutePlan(EState *estate,
                /*
                 * Execute the plan and obtain a tuple
                 */
-lnext: ;
-               if (estate->es_useEvalPlan)
-               {
-                       slot = EvalPlanQualNext(estate);
-                       if (TupIsNull(slot))
-                               slot = ExecProcNode(planstate);
-               }
-               else
-                       slot = ExecProcNode(planstate);
+               slot = ExecProcNode(planstate);
 
                /*
                 * if the tuple is null, then we assume there is nothing more to
-                * process so we just return null...
+                * process so we just end the loop...
                 */
                if (TupIsNull(slot))
-               {
-                       result = NULL;
                        break;
-               }
 
                /*
-                * if we have a junk filter, then project a new tuple with the junk
+                * If we have a junk filter, then project a new tuple with the junk
                 * removed.
                 *
                 * Store this new "clean" tuple in the junkfilter's resultSlot.
                 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
                 * because that tuple slot has the wrong descriptor.)
-                *
-                * Also, extract all the junk information we need.
                 */
-               if ((junkfilter = estate->es_junkFilter) != NULL)
-               {
-                       Datum           datum;
-                       bool            isNull;
-
-                       /*
-                        * extract the 'ctid' junk attribute.
-                        */
-                       if (operation == CMD_UPDATE || operation == CMD_DELETE)
-                       {
-                               if (!ExecGetJunkAttribute(junkfilter,
-                                                                                 slot,
-                                                                                 "ctid",
-                                                                                 &datum,
-                                                                                 &isNull))
-                                       elog(ERROR, "could not find junk ctid column");
-
-                               /* shouldn't ever get a null result... */
-                               if (isNull)
-                                       elog(ERROR, "ctid is NULL");
-
-                               tupleid = (ItemPointer) DatumGetPointer(datum);
-                               tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
-                               tupleid = &tuple_ctid;
-                       }
-
-                       /*
-                        * Process any FOR UPDATE or FOR SHARE locking requested.
-                        */
-                       else if (estate->es_rowMarks != NIL)
-                       {
-                               ListCell   *l;
-
-               lmark:  ;
-                               foreach(l, estate->es_rowMarks)
-                               {
-                                       ExecRowMark *erm = lfirst(l);
-                                       HeapTupleData tuple;
-                                       Buffer          buffer;
-                                       ItemPointerData update_ctid;
-                                       TransactionId update_xmax;
-                                       TupleTableSlot *newSlot;
-                                       LockTupleMode lockmode;
-                                       HTSU_Result test;
-
-                                       if (!ExecGetJunkAttribute(junkfilter,
-                                                                                         slot,
-                                                                                         erm->resname,
-                                                                                         &datum,
-                                                                                         &isNull))
-                                               elog(ERROR, "could not find junk \"%s\" column",
-                                                        erm->resname);
-
-                                       /* shouldn't ever get a null result... */
-                                       if (isNull)
-                                               elog(ERROR, "\"%s\" is NULL", erm->resname);
-
-                                       tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
-
-                                       if (erm->forUpdate)
-                                               lockmode = LockTupleExclusive;
-                                       else
-                                               lockmode = LockTupleShared;
-
-                                       test = heap_lock_tuple(erm->relation, &tuple, &buffer,
-                                                                                  &update_ctid, &update_xmax,
-                                                                                  estate->es_snapshot->curcid,
-                                                                                  lockmode, erm->noWait);
-                                       ReleaseBuffer(buffer);
-                                       switch (test)
-                                       {
-                                               case HeapTupleSelfUpdated:
-                                                       /* treat it as deleted; do not process */
-                                                       goto lnext;
-
-                                               case HeapTupleMayBeUpdated:
-                                                       break;
-
-                                               case HeapTupleUpdated:
-                                                       if (IsXactIsoLevelSerializable)
-                                                               ereport(ERROR,
-                                                                (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
-                                                                 errmsg("could not serialize access due to concurrent update")));
-                                                       if (!ItemPointerEquals(&update_ctid,
-                                                                                                  &tuple.t_self))
-                                                       {
-                                                               /* updated, so look at updated version */
-                                                               newSlot = EvalPlanQual(estate,
-                                                                                                          erm->rti,
-                                                                                                          &update_ctid,
-                                                                                                          update_xmax,
-                                                                                                          estate->es_snapshot->curcid);
-                                                               if (!TupIsNull(newSlot))
-                                                               {
-                                                                       slot = newSlot;
-                                                                       estate->es_useEvalPlan = true;
-                                                                       goto lmark;
-                                                               }
-                                                       }
-
-                                                       /*
-                                                        * if tuple was deleted or PlanQual failed for
-                                                        * updated tuple - we must not return this tuple!
-                                                        */
-                                                       goto lnext;
-
-                                               default:
-                                                       elog(ERROR, "unrecognized heap_lock_tuple status: %u",
-                                                                test);
-                                                       return NULL;
-                                       }
-                               }
-                       }
-
-                       /*
-                        * Finally create a new "clean" tuple with all junk attributes
-                        * removed
-                        */
-                       slot = ExecFilterJunk(junkfilter, slot);
-               }
+               if (estate->es_junkFilter != NULL)
+                       slot = ExecFilterJunk(estate->es_junkFilter, slot);
 
                /*
-                * now that we have a tuple, do the appropriate thing with it.. either
-                * return it to the user, add it to a relation someplace, delete it
-                * from a relation, or modify some of its attributes.
+                * If we are supposed to send the tuple somewhere, do so. (In
+                * practice, this is probably always the case at this point.)
                 */
-               switch (operation)
-               {
-                       case CMD_SELECT:
-                               ExecSelect(slot,        /* slot containing tuple */
-                                                  dest,        /* destination's tuple-receiver obj */
-                                                  estate);
-                               result = slot;
-                               break;
+               if (sendTuples)
+                       (*dest->receiveSlot) (slot, dest);
 
-                       case CMD_INSERT:
-                               ExecInsert(slot, tupleid, estate);
-                               result = NULL;
-                               break;
-
-                       case CMD_DELETE:
-                               ExecDelete(slot, tupleid, estate);
-                               result = NULL;
-                               break;
-
-                       case CMD_UPDATE:
-                               ExecUpdate(slot, tupleid, estate);
-                               result = NULL;
-                               break;
-
-                       default:
-                               elog(ERROR, "unrecognized operation code: %d",
-                                        (int) operation);
-                               result = NULL;
-                               break;
-               }
+               /*
+                * Count tuples processed, if this is a SELECT.  (For other operation
+                * types, the ModifyTable plan node must count the appropriate
+                * events.)
+                */
+               if (operation == CMD_SELECT)
+                       (estate->es_processed)++;
 
                /*
                 * check our tuple count.. if we've processed the proper number then
@@ -1335,465 +1479,47 @@ lnext: ;
                if (numberTuples && numberTuples == current_tuple_count)
                        break;
        }
-
-       /*
-        * Process AFTER EACH STATEMENT triggers
-        */
-       switch (operation)
-       {
-               case CMD_UPDATE:
-                       ExecASUpdateTriggers(estate, estate->es_result_relation_info);
-                       break;
-               case CMD_DELETE:
-                       ExecASDeleteTriggers(estate, estate->es_result_relation_info);
-                       break;
-               case CMD_INSERT:
-                       ExecASInsertTriggers(estate, estate->es_result_relation_info);
-                       break;
-               default:
-                       /* do nothing */
-                       break;
-       }
-
-       /*
-        * here, result is either a slot containing a tuple in the case of a
-        * SELECT or NULL otherwise.
-        */
-       return result;
 }
 
-/* ----------------------------------------------------------------
- *             ExecSelect
- *
- *             SELECTs are easy.. we just pass the tuple to the appropriate
- *             print function.  The only complexity is when we do a
- *             "SELECT INTO", in which case we insert the tuple into
- *             the appropriate relation (note: this is a newly created relation
- *             so we don't need to worry about indices or locks.)
- * ----------------------------------------------------------------
+
+/*
+ * ExecRelCheck --- check that tuple meets constraints for result relation
  */
-static void
-ExecSelect(TupleTableSlot *slot,
-                  DestReceiver *dest,
-                  EState *estate)
+static const char *
+ExecRelCheck(ResultRelInfo *resultRelInfo,
+                        TupleTableSlot *slot, EState *estate)
 {
+       Relation        rel = resultRelInfo->ri_RelationDesc;
+       int                     ncheck = rel->rd_att->constr->num_check;
+       ConstrCheck *check = rel->rd_att->constr->check;
+       ExprContext *econtext;
+       MemoryContext oldContext;
+       List       *qual;
+       int                     i;
+
        /*
-        * insert the tuple into the "into relation"
-        *
-        * XXX this probably ought to be replaced by a separate destination
+        * If first time through for this result relation, build expression
+        * nodetrees for rel's constraint expressions.  Keep them in the per-query
+        * memory context so they'll survive throughout the query.
         */
-       if (estate->es_into_relation_descriptor != NULL)
+       if (resultRelInfo->ri_ConstraintExprs == NULL)
        {
-               HeapTuple       tuple;
-
-               tuple = ExecCopySlotTuple(slot);
-               heap_insert(estate->es_into_relation_descriptor, tuple,
-                                       estate->es_snapshot->curcid,
-                                       estate->es_into_relation_use_wal,
-                                       false);         /* never any point in using FSM */
-               /* we know there are no indexes to update */
-               heap_freetuple(tuple);
-               IncrAppended();
+               oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
+               resultRelInfo->ri_ConstraintExprs =
+                       (List **) palloc(ncheck * sizeof(List *));
+               for (i = 0; i < ncheck; i++)
+               {
+                       /* ExecQual wants implicit-AND form */
+                       qual = make_ands_implicit(stringToNode(check[i].ccbin));
+                       resultRelInfo->ri_ConstraintExprs[i] = (List *)
+                               ExecPrepareExpr((Expr *) qual, estate);
+               }
+               MemoryContextSwitchTo(oldContext);
        }
 
        /*
-        * send the tuple to the destination
-        */
-       (*dest->receiveSlot) (slot, dest);
-       IncrRetrieved();
-       (estate->es_processed)++;
-}
-
-/* ----------------------------------------------------------------
- *             ExecInsert
- *
- *             INSERTs are trickier.. we have to insert the tuple into
- *             the base relation and insert appropriate tuples into the
- *             index relations.
- * ----------------------------------------------------------------
- */
-static void
-ExecInsert(TupleTableSlot *slot,
-                  ItemPointer tupleid,
-                  EState *estate)
-{
-       HeapTuple       tuple;
-       ResultRelInfo *resultRelInfo;
-       Relation        resultRelationDesc;
-       Oid                     newId;
-
-       /*
-        * get the heap tuple out of the tuple table slot, making sure we have a
-        * writable copy
-        */
-       tuple = ExecMaterializeSlot(slot);
-
-       /*
-        * get information on the (current) result relation
-        */
-       resultRelInfo = estate->es_result_relation_info;
-       resultRelationDesc = resultRelInfo->ri_RelationDesc;
-
-       /* BEFORE ROW INSERT Triggers */
-       if (resultRelInfo->ri_TrigDesc &&
-               resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
-       {
-               HeapTuple       newtuple;
-
-               newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
-
-               if (newtuple == NULL)   /* "do nothing" */
-                       return;
-
-               if (newtuple != tuple)  /* modified by Trigger(s) */
-               {
-                       /*
-                        * Put the modified tuple into a slot for convenience of routines
-                        * below.  We assume the tuple was allocated in per-tuple memory
-                        * context, and therefore will go away by itself. The tuple table
-                        * slot should not try to clear it.
-                        */
-                       TupleTableSlot *newslot = estate->es_trig_tuple_slot;
-
-                       if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
-                               ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
-                       ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
-                       slot = newslot;
-                       tuple = newtuple;
-               }
-       }
-
-       /*
-        * Check the constraints of the tuple
-        */
-       if (resultRelationDesc->rd_att->constr)
-               ExecConstraints(resultRelInfo, slot, estate);
-
-       /*
-        * insert the tuple
-        *
-        * Note: heap_insert returns the tid (location) of the new tuple in the
-        * t_self field.
-        */
-       newId = heap_insert(resultRelationDesc, tuple,
-                                               estate->es_snapshot->curcid,
-                                               true, true);
-
-       IncrAppended();
-       (estate->es_processed)++;
-       estate->es_lastoid = newId;
-       setLastTid(&(tuple->t_self));
-
-       /*
-        * insert index entries for tuple
-        */
-       if (resultRelInfo->ri_NumIndices > 0)
-               ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
-
-       /* AFTER ROW INSERT Triggers */
-       ExecARInsertTriggers(estate, resultRelInfo, tuple);
-}
-
-/* ----------------------------------------------------------------
- *             ExecDelete
- *
- *             DELETE is like UPDATE, except that we delete the tuple and no
- *             index modifications are needed
- * ----------------------------------------------------------------
- */
-static void
-ExecDelete(TupleTableSlot *slot,
-                  ItemPointer tupleid,
-                  EState *estate)
-{
-       ResultRelInfo *resultRelInfo;
-       Relation        resultRelationDesc;
-       HTSU_Result result;
-       ItemPointerData update_ctid;
-       TransactionId update_xmax;
-
-       /*
-        * get information on the (current) result relation
-        */
-       resultRelInfo = estate->es_result_relation_info;
-       resultRelationDesc = resultRelInfo->ri_RelationDesc;
-
-       /* BEFORE ROW DELETE Triggers */
-       if (resultRelInfo->ri_TrigDesc &&
-               resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
-       {
-               bool            dodelete;
-
-               dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
-                                                                               estate->es_snapshot->curcid);
-
-               if (!dodelete)                  /* "do nothing" */
-                       return;
-       }
-
-       /*
-        * delete the tuple
-        *
-        * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
-        * the row to be deleted is visible to that snapshot, and throw a can't-
-        * serialize error if not.      This is a special-case behavior needed for
-        * referential integrity updates in serializable transactions.
-        */
-ldelete:;
-       result = heap_delete(resultRelationDesc, tupleid,
-                                                &update_ctid, &update_xmax,
-                                                estate->es_snapshot->curcid,
-                                                estate->es_crosscheck_snapshot,
-                                                true /* wait for commit */ );
-       switch (result)
-       {
-               case HeapTupleSelfUpdated:
-                       /* already deleted by self; nothing to do */
-                       return;
-
-               case HeapTupleMayBeUpdated:
-                       break;
-
-               case HeapTupleUpdated:
-                       if (IsXactIsoLevelSerializable)
-                               ereport(ERROR,
-                                               (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
-                                                errmsg("could not serialize access due to concurrent update")));
-                       else if (!ItemPointerEquals(tupleid, &update_ctid))
-                       {
-                               TupleTableSlot *epqslot;
-
-                               epqslot = EvalPlanQual(estate,
-                                                                          resultRelInfo->ri_RangeTableIndex,
-                                                                          &update_ctid,
-                                                                          update_xmax,
-                                                                          estate->es_snapshot->curcid);
-                               if (!TupIsNull(epqslot))
-                               {
-                                       *tupleid = update_ctid;
-                                       goto ldelete;
-                               }
-                       }
-                       /* tuple already deleted; nothing to do */
-                       return;
-
-               default:
-                       elog(ERROR, "unrecognized heap_delete status: %u", result);
-                       return;
-       }
-
-       IncrDeleted();
-       (estate->es_processed)++;
-
-       /*
-        * Note: Normally one would think that we have to delete index tuples
-        * associated with the heap tuple now...
-        *
-        * ... but in POSTGRES, we have no need to do this because VACUUM will
-        * take care of it later.  We can't delete index tuples immediately
-        * anyway, since the tuple is still visible to other transactions.
-        */
-
-       /* AFTER ROW DELETE Triggers */
-       ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
-}
-
-/* ----------------------------------------------------------------
- *             ExecUpdate
- *
- *             note: we can't run UPDATE queries with transactions
- *             off because UPDATEs are actually INSERTs and our
- *             scan will mistakenly loop forever, updating the tuple
- *             it just inserted..      This should be fixed but until it
- *             is, we don't want to get stuck in an infinite loop
- *             which corrupts your database..
- * ----------------------------------------------------------------
- */
-static void
-ExecUpdate(TupleTableSlot *slot,
-                  ItemPointer tupleid,
-                  EState *estate)
-{
-       HeapTuple       tuple;
-       ResultRelInfo *resultRelInfo;
-       Relation        resultRelationDesc;
-       HTSU_Result result;
-       ItemPointerData update_ctid;
-       TransactionId update_xmax;
-
-       /*
-        * abort the operation if not running transactions
-        */
-       if (IsBootstrapProcessingMode())
-               elog(ERROR, "cannot UPDATE during bootstrap");
-
-       /*
-        * get the heap tuple out of the tuple table slot, making sure we have a
-        * writable copy
-        */
-       tuple = ExecMaterializeSlot(slot);
-
-       /*
-        * get information on the (current) result relation
-        */
-       resultRelInfo = estate->es_result_relation_info;
-       resultRelationDesc = resultRelInfo->ri_RelationDesc;
-
-       /* BEFORE ROW UPDATE Triggers */
-       if (resultRelInfo->ri_TrigDesc &&
-               resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
-       {
-               HeapTuple       newtuple;
-
-               newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
-                                                                               tupleid, tuple,
-                                                                               estate->es_snapshot->curcid);
-
-               if (newtuple == NULL)   /* "do nothing" */
-                       return;
-
-               if (newtuple != tuple)  /* modified by Trigger(s) */
-               {
-                       /*
-                        * Put the modified tuple into a slot for convenience of routines
-                        * below.  We assume the tuple was allocated in per-tuple memory
-                        * context, and therefore will go away by itself. The tuple table
-                        * slot should not try to clear it.
-                        */
-                       TupleTableSlot *newslot = estate->es_trig_tuple_slot;
-
-                       if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
-                               ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
-                       ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
-                       slot = newslot;
-                       tuple = newtuple;
-               }
-       }
-
-       /*
-        * Check the constraints of the tuple
-        *
-        * If we generate a new candidate tuple after EvalPlanQual testing, we
-        * must loop back here and recheck constraints.  (We don't need to redo
-        * triggers, however.  If there are any BEFORE triggers then trigger.c
-        * will have done heap_lock_tuple to lock the correct tuple, so there's no
-        * need to do them again.)
-        */
-lreplace:;
-       if (resultRelationDesc->rd_att->constr)
-               ExecConstraints(resultRelInfo, slot, estate);
-
-       /*
-        * replace the heap tuple
-        *
-        * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
-        * the row to be updated is visible to that snapshot, and throw a can't-
-        * serialize error if not.      This is a special-case behavior needed for
-        * referential integrity updates in serializable transactions.
-        */
-       result = heap_update(resultRelationDesc, tupleid, tuple,
-                                                &update_ctid, &update_xmax,
-                                                estate->es_snapshot->curcid,
-                                                estate->es_crosscheck_snapshot,
-                                                true /* wait for commit */ );
-       switch (result)
-       {
-               case HeapTupleSelfUpdated:
-                       /* already deleted by self; nothing to do */
-                       return;
-
-               case HeapTupleMayBeUpdated:
-                       break;
-
-               case HeapTupleUpdated:
-                       if (IsXactIsoLevelSerializable)
-                               ereport(ERROR,
-                                               (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
-                                                errmsg("could not serialize access due to concurrent update")));
-                       else if (!ItemPointerEquals(tupleid, &update_ctid))
-                       {
-                               TupleTableSlot *epqslot;
-
-                               epqslot = EvalPlanQual(estate,
-                                                                          resultRelInfo->ri_RangeTableIndex,
-                                                                          &update_ctid,
-                                                                          update_xmax,
-                                                                          estate->es_snapshot->curcid);
-                               if (!TupIsNull(epqslot))
-                               {
-                                       *tupleid = update_ctid;
-                                       slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
-                                       tuple = ExecMaterializeSlot(slot);
-                                       goto lreplace;
-                               }
-                       }
-                       /* tuple already deleted; nothing to do */
-                       return;
-
-               default:
-                       elog(ERROR, "unrecognized heap_update status: %u", result);
-                       return;
-       }
-
-       IncrReplaced();
-       (estate->es_processed)++;
-
-       /*
-        * Note: instead of having to update the old index tuples associated with
-        * the heap tuple, all we do is form and insert new index tuples. This is
-        * because UPDATEs are actually DELETEs and INSERTs, and index tuple
-        * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
-        * here is insert new index tuples.  -cim 9/27/89
-        */
-
-       /*
-        * insert index entries for tuple
-        *
-        * Note: heap_update returns the tid (location) of the new tuple in the
-        * t_self field.
-        */
-       if (resultRelInfo->ri_NumIndices > 0)
-               ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
-
-       /* AFTER ROW UPDATE Triggers */
-       ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
-}
-
-static const char *
-ExecRelCheck(ResultRelInfo *resultRelInfo,
-                        TupleTableSlot *slot, EState *estate)
-{
-       Relation        rel = resultRelInfo->ri_RelationDesc;
-       int                     ncheck = rel->rd_att->constr->num_check;
-       ConstrCheck *check = rel->rd_att->constr->check;
-       ExprContext *econtext;
-       MemoryContext oldContext;
-       List       *qual;
-       int                     i;
-
-       /*
-        * If first time through for this result relation, build expression
-        * nodetrees for rel's constraint expressions.  Keep them in the per-query
-        * memory context so they'll survive throughout the query.
-        */
-       if (resultRelInfo->ri_ConstraintExprs == NULL)
-       {
-               oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
-               resultRelInfo->ri_ConstraintExprs =
-                       (List **) palloc(ncheck * sizeof(List *));
-               for (i = 0; i < ncheck; i++)
-               {
-                       /* ExecQual wants implicit-AND form */
-                       qual = make_ands_implicit(stringToNode(check[i].ccbin));
-                       resultRelInfo->ri_ConstraintExprs[i] = (List *)
-                               ExecPrepareExpr((Expr *) qual, estate);
-               }
-               MemoryContextSwitchTo(oldContext);
-       }
-
-       /*
-        * We will use the EState's per-tuple context for evaluating constraint
-        * expressions (creating it if it's not already there).
+        * We will use the EState's per-tuple context for evaluating constraint
+        * expressions (creating it if it's not already there).
         */
        econtext = GetPerTupleExprContext(estate);
 
@@ -1855,17 +1581,96 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
        }
 }
 
+
 /*
- * Check a modified tuple to see if we want to process its updated version
- * under READ COMMITTED rules.
+ * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
+ */
+ExecRowMark *
+ExecFindRowMark(EState *estate, Index rti)
+{
+       ListCell   *lc;
+
+       foreach(lc, estate->es_rowMarks)
+       {
+               ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
+
+               if (erm->rti == rti)
+                       return erm;
+       }
+       elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
+       return NULL;                            /* keep compiler quiet */
+}
+
+/*
+ * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
+ *
+ * Inputs are the underlying ExecRowMark struct and the targetlist of the
+ * input plan node (not planstate node!).  We need the latter to find out
+ * the column numbers of the resjunk columns.
+ */
+ExecAuxRowMark *
+ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
+{
+       ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
+       char            resname[32];
+
+       aerm->rowmark = erm;
+
+       /* Look up the resjunk columns associated with this rowmark */
+       if (erm->relation)
+       {
+               Assert(erm->markType != ROW_MARK_COPY);
+
+               /* if child rel, need tableoid */
+               if (erm->rti != erm->prti)
+               {
+                       snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
+                       aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
+                                                                                                                  resname);
+                       if (!AttributeNumberIsValid(aerm->toidAttNo))
+                               elog(ERROR, "could not find junk %s column", resname);
+               }
+
+               /* always need ctid for real relations */
+               snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
+               aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
+                                                                                                          resname);
+               if (!AttributeNumberIsValid(aerm->ctidAttNo))
+                       elog(ERROR, "could not find junk %s column", resname);
+       }
+       else
+       {
+               Assert(erm->markType == ROW_MARK_COPY);
+
+               snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
+               aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
+                                                                                                               resname);
+               if (!AttributeNumberIsValid(aerm->wholeAttNo))
+                       elog(ERROR, "could not find junk %s column", resname);
+       }
+
+       return aerm;
+}
+
+
+/*
+ * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
+ * process the updated version under READ COMMITTED rules.
  *
  * See backend/executor/README for some info about how this works.
+ */
+
+
+/*
+ * Check a modified tuple to see if we want to process its updated version
+ * under READ COMMITTED rules.
  *
- *     estate - executor state data
+ *     estate - outer executor state data
+ *     epqstate - state for EvalPlanQual rechecking
+ *     relation - table containing tuple
  *     rti - rangetable index of table containing tuple
  *     *tid - t_ctid from the outdated tuple (ie, next updated version)
  *     priorXmax - t_xmax from the outdated tuple
- *     curCid - command ID of current command of my transaction
  *
  * *tid is also an output parameter: it's modified to hold the TID of the
  * latest version of the tuple (note this may be changed even on failure)
@@ -1874,53 +1679,113 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
  * NULL if we determine we shouldn't process the row.
  */
 TupleTableSlot *
-EvalPlanQual(EState *estate, Index rti,
-                        ItemPointer tid, TransactionId priorXmax, CommandId curCid)
+EvalPlanQual(EState *estate, EPQState *epqstate,
+                        Relation relation, Index rti,
+                        ItemPointer tid, TransactionId priorXmax)
 {
-       evalPlanQual *epq;
-       EState     *epqstate;
-       Relation        relation;
-       HeapTupleData tuple;
-       HeapTuple       copyTuple = NULL;
-       bool            endNode;
+       TupleTableSlot *slot;
+       HeapTuple       copyTuple;
 
-       Assert(rti != 0);
+       Assert(rti > 0);
 
        /*
-        * find relation containing target tuple
+        * Get and lock the updated version of the row; if fail, return NULL.
         */
-       if (estate->es_result_relation_info != NULL &&
-               estate->es_result_relation_info->ri_RangeTableIndex == rti)
-               relation = estate->es_result_relation_info->ri_RelationDesc;
-       else
-       {
-               ListCell   *l;
+       copyTuple = EvalPlanQualFetch(estate, relation, LockTupleExclusive,
+                                                                 tid, priorXmax);
 
-               relation = NULL;
-               foreach(l, estate->es_rowMarks)
-               {
-                       if (((ExecRowMark *) lfirst(l))->rti == rti)
-                       {
-                               relation = ((ExecRowMark *) lfirst(l))->relation;
-                               break;
-                       }
-               }
-               if (relation == NULL)
-                       elog(ERROR, "could not find RowMark for RT index %u", rti);
-       }
+       if (copyTuple == NULL)
+               return NULL;
+
+       /*
+        * For UPDATE/DELETE we have to return tid of actual row we're executing
+        * PQ for.
+        */
+       *tid = copyTuple->t_self;
+
+       /*
+        * Need to run a recheck subquery.      Initialize or reinitialize EPQ state.
+        */
+       EvalPlanQualBegin(epqstate, estate);
+
+       /*
+        * Free old test tuple, if any, and store new tuple where relation's scan
+        * node will see it
+        */
+       EvalPlanQualSetTuple(epqstate, rti, copyTuple);
+
+       /*
+        * Fetch any non-locked source rows
+        */
+       EvalPlanQualFetchRowMarks(epqstate);
+
+       /*
+        * Run the EPQ query.  We assume it will return at most one tuple.
+        */
+       slot = EvalPlanQualNext(epqstate);
+
+       /*
+        * If we got a tuple, force the slot to materialize the tuple so that it
+        * is not dependent on any local state in the EPQ query (in particular,
+        * it's highly likely that the slot contains references to any pass-by-ref
+        * datums that may be present in copyTuple).  As with the next step, this
+        * is to guard against early re-use of the EPQ query.
+        */
+       if (!TupIsNull(slot))
+               (void) ExecMaterializeSlot(slot);
 
        /*
-        * fetch tid tuple
+        * Clear out the test tuple.  This is needed in case the EPQ query is
+        * re-used to test a tuple for a different relation.  (Not clear that can
+        * really happen, but let's be safe.)
+        */
+       EvalPlanQualSetTuple(epqstate, rti, NULL);
+
+       return slot;
+}
+
+/*
+ * Fetch a copy of the newest version of an outdated tuple
+ *
+ *     estate - executor state data
+ *     relation - table containing tuple
+ *     lockmode - requested tuple lock mode
+ *     *tid - t_ctid from the outdated tuple (ie, next updated version)
+ *     priorXmax - t_xmax from the outdated tuple
+ *
+ * Returns a palloc'd copy of the newest tuple version, or NULL if we find
+ * that there is no newest version (ie, the row was deleted not updated).
+ * If successful, we have locked the newest tuple version, so caller does not
+ * need to worry about it changing anymore.
+ *
+ * Note: properly, lockmode should be declared as enum LockTupleMode,
+ * but we use "int" to avoid having to include heapam.h in executor.h.
+ */
+HeapTuple
+EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
+                                 ItemPointer tid, TransactionId priorXmax)
+{
+       HeapTuple       copyTuple = NULL;
+       HeapTupleData tuple;
+       SnapshotData SnapshotDirty;
+
+       /*
+        * fetch target tuple
         *
         * Loop here to deal with updated or busy tuples
         */
+       InitDirtySnapshot(SnapshotDirty);
        tuple.t_self = *tid;
        for (;;)
        {
                Buffer          buffer;
 
-               if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, true, NULL))
+               if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
                {
+                       HTSU_Result test;
+                       ItemPointerData update_ctid;
+                       TransactionId update_xmax;
+
                        /*
                         * If xmin isn't what we're expecting, the slot must have been
                         * recycled and reused for an unrelated tuple.  This implies that
@@ -1937,386 +1802,837 @@ EvalPlanQual(EState *estate, Index rti,
                        }
 
                        /* otherwise xmin should not be dirty... */
-                       if (TransactionIdIsValid(SnapshotDirty->xmin))
+                       if (TransactionIdIsValid(SnapshotDirty.xmin))
                                elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
 
                        /*
                         * If tuple is being updated by other transaction then we have to
                         * wait for its commit/abort.
                         */
-                       if (TransactionIdIsValid(SnapshotDirty->xmax))
+                       if (TransactionIdIsValid(SnapshotDirty.xmax))
                        {
                                ReleaseBuffer(buffer);
-                               XactLockTableWait(SnapshotDirty->xmax);
+                               XactLockTableWait(SnapshotDirty.xmax);
                                continue;               /* loop back to repeat heap_fetch */
                        }
 
                        /*
                         * If tuple was inserted by our own transaction, we have to check
-                        * cmin against curCid: cmin >= curCid means our command cannot
-                        * see the tuple, so we should ignore it.  Without this we are
-                        * open to the "Halloween problem" of indefinitely re-updating
-                        * the same tuple.  (We need not check cmax because
-                        * HeapTupleSatisfiesDirty will consider a tuple deleted by
-                        * our transaction dead, regardless of cmax.)  We just checked
-                        * that priorXmax == xmin, so we can test that variable instead
-                        * of doing HeapTupleHeaderGetXmin again.
+                        * cmin against es_output_cid: cmin >= current CID means our
+                        * command cannot see the tuple, so we should ignore it.  Without
+                        * this we are open to the "Halloween problem" of indefinitely
+                        * re-updating the same tuple. (We need not check cmax because
+                        * HeapTupleSatisfiesDirty will consider a tuple deleted by our
+                        * transaction dead, regardless of cmax.)  We just checked that
+                        * priorXmax == xmin, so we can test that variable instead of
+                        * doing HeapTupleHeaderGetXmin again.
                         */
                        if (TransactionIdIsCurrentTransactionId(priorXmax) &&
-                               HeapTupleHeaderGetCmin(tuple.t_data) >= curCid)
+                               HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
                        {
                                ReleaseBuffer(buffer);
                                return NULL;
                        }
 
-                       /*
-                        * We got tuple - now copy it for use by recheck query.
-                        */
-                       copyTuple = heap_copytuple(&tuple);
+                       /*
+                        * This is a live tuple, so now try to lock it.
+                        */
+                       test = heap_lock_tuple(relation, &tuple, &buffer,
+                                                                  &update_ctid, &update_xmax,
+                                                                  estate->es_output_cid,
+                                                                  lockmode, false);
+                       /* We now have two pins on the buffer, get rid of one */
+                       ReleaseBuffer(buffer);
+
+                       switch (test)
+                       {
+                               case HeapTupleSelfUpdated:
+                                       /* treat it as deleted; do not process */
+                                       ReleaseBuffer(buffer);
+                                       return NULL;
+
+                               case HeapTupleMayBeUpdated:
+                                       /* successfully locked */
+                                       break;
+
+                               case HeapTupleUpdated:
+                                       ReleaseBuffer(buffer);
+                                       if (IsolationUsesXactSnapshot())
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+                                                                errmsg("could not serialize access due to concurrent update")));
+                                       if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
+                                       {
+                                               /* it was updated, so look at the updated version */
+                                               tuple.t_self = update_ctid;
+                                               /* updated row should have xmin matching this xmax */
+                                               priorXmax = update_xmax;
+                                               continue;
+                                       }
+                                       /* tuple was deleted, so give up */
+                                       return NULL;
+
+                               default:
+                                       ReleaseBuffer(buffer);
+                                       elog(ERROR, "unrecognized heap_lock_tuple status: %u",
+                                                test);
+                                       return NULL;    /* keep compiler quiet */
+                       }
+
+                       /*
+                        * We got tuple - now copy it for use by recheck query.
+                        */
+                       copyTuple = heap_copytuple(&tuple);
+                       ReleaseBuffer(buffer);
+                       break;
+               }
+
+               /*
+                * If the referenced slot was actually empty, the latest version of
+                * the row must have been deleted, so we need do nothing.
+                */
+               if (tuple.t_data == NULL)
+               {
+                       ReleaseBuffer(buffer);
+                       return NULL;
+               }
+
+               /*
+                * As above, if xmin isn't what we're expecting, do nothing.
+                */
+               if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
+                                                                priorXmax))
+               {
+                       ReleaseBuffer(buffer);
+                       return NULL;
+               }
+
+               /*
+                * If we get here, the tuple was found but failed SnapshotDirty.
+                * Assuming the xmin is either a committed xact or our own xact (as it
+                * certainly should be if we're trying to modify the tuple), this must
+                * mean that the row was updated or deleted by either a committed xact
+                * or our own xact.  If it was deleted, we can ignore it; if it was
+                * updated then chain up to the next version and repeat the whole
+                * process.
+                *
+                * As above, it should be safe to examine xmax and t_ctid without the
+                * buffer content lock, because they can't be changing.
+                */
+               if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
+               {
+                       /* deleted, so forget about it */
+                       ReleaseBuffer(buffer);
+                       return NULL;
+               }
+
+               /* updated, so look at the updated row */
+               tuple.t_self = tuple.t_data->t_ctid;
+               /* updated row should have xmin matching this xmax */
+               priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
+               ReleaseBuffer(buffer);
+               /* loop back to fetch next in chain */
+       }
+
+       /*
+        * Return the copied tuple
+        */
+       return copyTuple;
+}
+
+/*
+ * EvalPlanQualInit -- initialize during creation of a plan state node
+ * that might need to invoke EPQ processing.
+ *
+ * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
+ * with EvalPlanQualSetPlan.
+ */
+void
+EvalPlanQualInit(EPQState *epqstate, EState *estate,
+                                Plan *subplan, List *auxrowmarks, int epqParam)
+{
+       /* Mark the EPQ state inactive */
+       epqstate->estate = NULL;
+       epqstate->planstate = NULL;
+       epqstate->origslot = NULL;
+       /* ... and remember data that EvalPlanQualBegin will need */
+       epqstate->plan = subplan;
+       epqstate->arowMarks = auxrowmarks;
+       epqstate->epqParam = epqParam;
+}
+
+/*
+ * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
+ *
+ * We need this so that ModifyTuple can deal with multiple subplans.
+ */
+void
+EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
+{
+       /* If we have a live EPQ query, shut it down */
+       EvalPlanQualEnd(epqstate);
+       /* And set/change the plan pointer */
+       epqstate->plan = subplan;
+       /* The rowmarks depend on the plan, too */
+       epqstate->arowMarks = auxrowmarks;
+}
+
+/*
+ * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
+ *
+ * NB: passed tuple must be palloc'd; it may get freed later
+ */
+void
+EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
+{
+       EState     *estate = epqstate->estate;
+
+       Assert(rti > 0);
+
+       /*
+        * free old test tuple, if any, and store new tuple where relation's scan
+        * node will see it
+        */
+       if (estate->es_epqTuple[rti - 1] != NULL)
+               heap_freetuple(estate->es_epqTuple[rti - 1]);
+       estate->es_epqTuple[rti - 1] = tuple;
+       estate->es_epqTupleSet[rti - 1] = true;
+}
+
+/*
+ * Fetch back the current test tuple (if any) for the specified RTI
+ */
+HeapTuple
+EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
+{
+       EState     *estate = epqstate->estate;
+
+       Assert(rti > 0);
+
+       return estate->es_epqTuple[rti - 1];
+}
+
+/*
+ * Fetch the current row values for any non-locked relations that need
+ * to be scanned by an EvalPlanQual operation. origslot must have been set
+ * to contain the current result row (top-level row) that we need to recheck.
+ */
+void
+EvalPlanQualFetchRowMarks(EPQState *epqstate)
+{
+       ListCell   *l;
+
+       Assert(epqstate->origslot != NULL);
+
+       foreach(l, epqstate->arowMarks)
+       {
+               ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
+               ExecRowMark *erm = aerm->rowmark;
+               Datum           datum;
+               bool            isNull;
+               HeapTupleData tuple;
+
+               if (RowMarkRequiresRowShareLock(erm->markType))
+                       elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
+
+               /* clear any leftover test tuple for this rel */
+               EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
+
+               if (erm->relation)
+               {
+                       Buffer          buffer;
+
+                       Assert(erm->markType == ROW_MARK_REFERENCE);
+
+                       /* if child rel, must check whether it produced this row */
+                       if (erm->rti != erm->prti)
+                       {
+                               Oid                     tableoid;
+
+                               datum = ExecGetJunkAttribute(epqstate->origslot,
+                                                                                        aerm->toidAttNo,
+                                                                                        &isNull);
+                               /* non-locked rels could be on the inside of outer joins */
+                               if (isNull)
+                                       continue;
+                               tableoid = DatumGetObjectId(datum);
+
+                               if (tableoid != RelationGetRelid(erm->relation))
+                               {
+                                       /* this child is inactive right now */
+                                       continue;
+                               }
+                       }
+
+                       /* fetch the tuple's ctid */
+                       datum = ExecGetJunkAttribute(epqstate->origslot,
+                                                                                aerm->ctidAttNo,
+                                                                                &isNull);
+                       /* non-locked rels could be on the inside of outer joins */
+                       if (isNull)
+                               continue;
+                       tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
+
+                       /* okay, fetch the tuple */
+                       if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
+                                                       false, NULL))
+                               elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+
+                       /* successful, copy and store tuple */
+                       EvalPlanQualSetTuple(epqstate, erm->rti,
+                                                                heap_copytuple(&tuple));
                        ReleaseBuffer(buffer);
-                       break;
                }
-
-               /*
-                * If the referenced slot was actually empty, the latest version of
-                * the row must have been deleted, so we need do nothing.
-                */
-               if (tuple.t_data == NULL)
+               else
                {
-                       ReleaseBuffer(buffer);
-                       return NULL;
+                       HeapTupleHeader td;
+
+                       Assert(erm->markType == ROW_MARK_COPY);
+
+                       /* fetch the whole-row Var for the relation */
+                       datum = ExecGetJunkAttribute(epqstate->origslot,
+                                                                                aerm->wholeAttNo,
+                                                                                &isNull);
+                       /* non-locked rels could be on the inside of outer joins */
+                       if (isNull)
+                               continue;
+                       td = DatumGetHeapTupleHeader(datum);
+
+                       /* build a temporary HeapTuple control structure */
+                       tuple.t_len = HeapTupleHeaderGetDatumLength(td);
+                       ItemPointerSetInvalid(&(tuple.t_self));
+                       tuple.t_tableOid = InvalidOid;
+                       tuple.t_data = td;
+
+                       /* copy and store tuple */
+                       EvalPlanQualSetTuple(epqstate, erm->rti,
+                                                                heap_copytuple(&tuple));
                }
+       }
+}
+
+/*
+ * Fetch the next row (if any) from EvalPlanQual testing
+ *
+ * (In practice, there should never be more than one row...)
+ */
+TupleTableSlot *
+EvalPlanQualNext(EPQState *epqstate)
+{
+       MemoryContext oldcontext;
+       TupleTableSlot *slot;
+
+       oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
+       slot = ExecProcNode(epqstate->planstate);
+       MemoryContextSwitchTo(oldcontext);
+
+       return slot;
+}
+
+/*
+ * Initialize or reset an EvalPlanQual state tree
+ */
+void
+EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
+{
+       EState     *estate = epqstate->estate;
 
+       if (estate == NULL)
+       {
+               /* First time through, so create a child EState */
+               EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
+       }
+       else
+       {
                /*
-                * As above, if xmin isn't what we're expecting, do nothing.
+                * We already have a suitable child EPQ tree, so just reset it.
                 */
-               if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
-                                                                priorXmax))
+               int                     rtsize = list_length(parentestate->es_range_table);
+               PlanState  *planstate = epqstate->planstate;
+
+               MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
+
+               /* Recopy current values of parent parameters */
+               if (parentestate->es_plannedstmt->nParamExec > 0)
                {
-                       ReleaseBuffer(buffer);
-                       return NULL;
+                       int                     i = parentestate->es_plannedstmt->nParamExec;
+
+                       while (--i >= 0)
+                       {
+                               /* copy value if any, but not execPlan link */
+                               estate->es_param_exec_vals[i].value =
+                                       parentestate->es_param_exec_vals[i].value;
+                               estate->es_param_exec_vals[i].isnull =
+                                       parentestate->es_param_exec_vals[i].isnull;
+                       }
                }
 
                /*
-                * If we get here, the tuple was found but failed SnapshotDirty.
-                * Assuming the xmin is either a committed xact or our own xact (as it
-                * certainly should be if we're trying to modify the tuple), this must
-                * mean that the row was updated or deleted by either a committed xact
-                * or our own xact.  If it was deleted, we can ignore it; if it was
-                * updated then chain up to the next version and repeat the whole
-                * test.
-                *
-                * As above, it should be safe to examine xmax and t_ctid without the
-                * buffer content lock, because they can't be changing.
+                * Mark child plan tree as needing rescan at all scan nodes.  The
+                * first ExecProcNode will take care of actually doing the rescan.
                 */
-               if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
-               {
-                       /* deleted, so forget about it */
-                       ReleaseBuffer(buffer);
-                       return NULL;
-               }
-
-               /* updated, so look at the updated row */
-               tuple.t_self = tuple.t_data->t_ctid;
-               /* updated row should have xmin matching this xmax */
-               priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
-               ReleaseBuffer(buffer);
-               /* loop back to fetch next in chain */
+               planstate->chgParam = bms_add_member(planstate->chgParam,
+                                                                                        epqstate->epqParam);
        }
+}
 
-       /*
-        * For UPDATE/DELETE we have to return tid of actual row we're executing
-        * PQ for.
-        */
-       *tid = tuple.t_self;
+/*
+ * Start execution of an EvalPlanQual plan tree.
+ *
+ * This is a cut-down version of ExecutorStart(): we copy some state from
+ * the top-level estate rather than initializing it fresh.
+ */
+static void
+EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
+{
+       EState     *estate;
+       int                     rtsize;
+       MemoryContext oldcontext;
+       ListCell   *l;
 
-       /*
-        * Need to run a recheck subquery.      Find or create a PQ stack entry.
-        */
-       epq = estate->es_evalPlanQual;
-       endNode = true;
+       rtsize = list_length(parentestate->es_range_table);
 
-       if (epq != NULL && epq->rti == 0)
-       {
-               /* Top PQ stack entry is idle, so re-use it */
-               Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
-               epq->rti = rti;
-               endNode = false;
-       }
+       epqstate->estate = estate = CreateExecutorState();
 
-       /*
-        * If this is request for another RTE - Ra, - then we have to check wasn't
-        * PlanQual requested for Ra already and if so then Ra' row was updated
-        * again and we have to re-start old execution for Ra and forget all what
-        * we done after Ra was suspended. Cool? -:))
-        */
-       if (epq != NULL && epq->rti != rti &&
-               epq->estate->es_evTuple[rti - 1] != NULL)
-       {
-               do
-               {
-                       evalPlanQual *oldepq;
-
-                       /* stop execution */
-                       EvalPlanQualStop(epq);
-                       /* pop previous PlanQual from the stack */
-                       oldepq = epq->next;
-                       Assert(oldepq && oldepq->rti != 0);
-                       /* push current PQ to freePQ stack */
-                       oldepq->free = epq;
-                       epq = oldepq;
-                       estate->es_evalPlanQual = epq;
-               } while (epq->rti != rti);
-       }
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
 
        /*
-        * If we are requested for another RTE then we have to suspend execution
-        * of current PlanQual and start execution for new one.
+        * Child EPQ EStates share the parent's copy of unchanging state such as
+        * the snapshot, rangetable, result-rel info, and external Param info.
+        * They need their own copies of local state, including a tuple table,
+        * es_param_exec_vals, etc.
         */
-       if (epq == NULL || epq->rti != rti)
+       estate->es_direction = ForwardScanDirection;
+       estate->es_snapshot = parentestate->es_snapshot;
+       estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
+       estate->es_range_table = parentestate->es_range_table;
+       estate->es_plannedstmt = parentestate->es_plannedstmt;
+       estate->es_junkFilter = parentestate->es_junkFilter;
+       estate->es_output_cid = parentestate->es_output_cid;
+       estate->es_result_relations = parentestate->es_result_relations;
+       estate->es_num_result_relations = parentestate->es_num_result_relations;
+       estate->es_result_relation_info = parentestate->es_result_relation_info;
+       /* es_trig_target_relations must NOT be copied */
+       estate->es_rowMarks = parentestate->es_rowMarks;
+       estate->es_top_eflags = parentestate->es_top_eflags;
+       estate->es_instrument = parentestate->es_instrument;
+       estate->es_select_into = parentestate->es_select_into;
+       estate->es_into_oids = parentestate->es_into_oids;
+       /* es_auxmodifytables must NOT be copied */
+
+       /*
+        * The external param list is simply shared from parent.  The internal
+        * param workspace has to be local state, but we copy the initial values
+        * from the parent, so as to have access to any param values that were
+        * already set from other parts of the parent's plan tree.
+        */
+       estate->es_param_list_info = parentestate->es_param_list_info;
+       if (parentestate->es_plannedstmt->nParamExec > 0)
        {
-               /* try to reuse plan used previously */
-               evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
+               int                     i = parentestate->es_plannedstmt->nParamExec;
 
-               if (newepq == NULL)             /* first call or freePQ stack is empty */
-               {
-                       newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
-                       newepq->free = NULL;
-                       newepq->estate = NULL;
-                       newepq->planstate = NULL;
-               }
-               else
+               estate->es_param_exec_vals = (ParamExecData *)
+                       palloc0(i * sizeof(ParamExecData));
+               while (--i >= 0)
                {
-                       /* recycle previously used PlanQual */
-                       Assert(newepq->estate == NULL);
-                       epq->free = NULL;
+                       /* copy value if any, but not execPlan link */
+                       estate->es_param_exec_vals[i].value =
+                               parentestate->es_param_exec_vals[i].value;
+                       estate->es_param_exec_vals[i].isnull =
+                               parentestate->es_param_exec_vals[i].isnull;
                }
-               /* push current PQ to the stack */
-               newepq->next = epq;
-               epq = newepq;
-               estate->es_evalPlanQual = epq;
-               epq->rti = rti;
-               endNode = false;
        }
 
-       Assert(epq->rti == rti);
-
        /*
-        * Ok - we're requested for the same RTE.  Unfortunately we still have to
-        * end and restart execution of the plan, because ExecReScan wouldn't
-        * ensure that upper plan nodes would reset themselves.  We could make
-        * that work if insertion of the target tuple were integrated with the
-        * Param mechanism somehow, so that the upper plan nodes know that their
-        * children's outputs have changed.
-        *
-        * Note that the stack of free evalPlanQual nodes is quite useless at the
-        * moment, since it only saves us from pallocing/releasing the
-        * evalPlanQual nodes themselves.  But it will be useful once we implement
-        * ReScan instead of end/restart for re-using PlanQual nodes.
+        * Each EState must have its own es_epqScanDone state, but if we have
+        * nested EPQ checks they should share es_epqTuple arrays.      This allows
+        * sub-rechecks to inherit the values being examined by an outer recheck.
         */
-       if (endNode)
+       estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
+       if (parentestate->es_epqTuple != NULL)
+       {
+               estate->es_epqTuple = parentestate->es_epqTuple;
+               estate->es_epqTupleSet = parentestate->es_epqTupleSet;
+       }
+       else
        {
-               /* stop execution */
-               EvalPlanQualStop(epq);
+               estate->es_epqTuple = (HeapTuple *)
+                       palloc0(rtsize * sizeof(HeapTuple));
+               estate->es_epqTupleSet = (bool *)
+                       palloc0(rtsize * sizeof(bool));
        }
 
        /*
-        * Initialize new recheck query.
-        *
-        * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
-        * instead copy down changeable state from the top plan (including
-        * es_result_relation_info, es_junkFilter) and reset locally changeable
-        * state in the epq (including es_param_exec_vals, es_evTupleNull).
+        * Each estate also has its own tuple table.
+        */
+       estate->es_tupleTable = NIL;
+
+       /*
+        * Initialize private state information for each SubPlan.  We must do this
+        * before running ExecInitNode on the main query tree, since
+        * ExecInitSubPlan expects to be able to find these entries. Some of the
+        * SubPlans might not be used in the part of the plan tree we intend to
+        * run, but since it's not easy to tell which, we just initialize them
+        * all.  (However, if the subplan is headed by a ModifyTable node, then it
+        * must be a data-modifying CTE, which we will certainly not need to
+        * re-run, so we can skip initializing it.      This is just an efficiency
+        * hack; it won't skip data-modifying CTEs for which the ModifyTable node
+        * is not at the top.)
         */
-       EvalPlanQualStart(epq, estate, epq->next);
+       Assert(estate->es_subplanstates == NIL);
+       foreach(l, parentestate->es_plannedstmt->subplans)
+       {
+               Plan       *subplan = (Plan *) lfirst(l);
+               PlanState  *subplanstate;
+
+               /* Don't initialize ModifyTable subplans, per comment above */
+               if (IsA(subplan, ModifyTable))
+                       subplanstate = NULL;
+               else
+                       subplanstate = ExecInitNode(subplan, estate, 0);
+
+               estate->es_subplanstates = lappend(estate->es_subplanstates,
+                                                                                  subplanstate);
+       }
 
        /*
-        * free old RTE' tuple, if any, and store target tuple where relation's
-        * scan node will see it
+        * Initialize the private state information for all the nodes in the part
+        * of the plan tree we need to run.  This opens files, allocates storage
+        * and leaves us ready to start processing tuples.
         */
-       epqstate = epq->estate;
-       if (epqstate->es_evTuple[rti - 1] != NULL)
-               heap_freetuple(epqstate->es_evTuple[rti - 1]);
-       epqstate->es_evTuple[rti - 1] = copyTuple;
+       epqstate->planstate = ExecInitNode(planTree, estate, 0);
 
-       return EvalPlanQualNext(estate);
+       MemoryContextSwitchTo(oldcontext);
 }
 
-static TupleTableSlot *
-EvalPlanQualNext(EState *estate)
+/*
+ * EvalPlanQualEnd -- shut down at termination of parent plan state node,
+ * or if we are done with the current EPQ child.
+ *
+ * This is a cut-down version of ExecutorEnd(); basically we want to do most
+ * of the normal cleanup, but *not* close result relations (which we are
+ * just sharing from the outer query). We do, however, have to close any
+ * trigger target relations that got opened, since those are not shared.
+ * (There probably shouldn't be any of the latter, but just in case...)
+ */
+void
+EvalPlanQualEnd(EPQState *epqstate)
 {
-       evalPlanQual *epq = estate->es_evalPlanQual;
+       EState     *estate = epqstate->estate;
        MemoryContext oldcontext;
-       TupleTableSlot *slot;
+       ListCell   *l;
 
-       Assert(epq->rti != 0);
+       if (estate == NULL)
+               return;                                 /* idle, so nothing to do */
 
-lpqnext:;
-       oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
-       slot = ExecProcNode(epq->planstate);
-       MemoryContextSwitchTo(oldcontext);
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
 
-       /*
-        * No more tuples for this PQ. Continue previous one.
-        */
-       if (TupIsNull(slot))
+       ExecEndNode(epqstate->planstate);
+
+       foreach(l, estate->es_subplanstates)
        {
-               evalPlanQual *oldepq;
+               PlanState  *subplanstate = (PlanState *) lfirst(l);
 
-               /* stop execution */
-               EvalPlanQualStop(epq);
-               /* pop old PQ from the stack */
-               oldepq = epq->next;
-               if (oldepq == NULL)
-               {
-                       /* this is the first (oldest) PQ - mark as free */
-                       epq->rti = 0;
-                       estate->es_useEvalPlan = false;
-                       /* and continue Query execution */
-                       return NULL;
-               }
-               Assert(oldepq->rti != 0);
-               /* push current PQ to freePQ stack */
-               oldepq->free = epq;
-               epq = oldepq;
-               estate->es_evalPlanQual = epq;
-               goto lpqnext;
+               ExecEndNode(subplanstate);
        }
 
-       return slot;
+       /* throw away the per-estate tuple table */
+       ExecResetTupleTable(estate->es_tupleTable, false);
+
+       /* close any trigger target relations attached to this EState */
+       foreach(l, estate->es_trig_target_relations)
+       {
+               ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
+
+               /* Close indices and then the relation itself */
+               ExecCloseIndices(resultRelInfo);
+               heap_close(resultRelInfo->ri_RelationDesc, NoLock);
+       }
+
+       MemoryContextSwitchTo(oldcontext);
+
+       FreeExecutorState(estate);
+
+       /* Mark EPQState idle */
+       epqstate->estate = NULL;
+       epqstate->planstate = NULL;
+       epqstate->origslot = NULL;
 }
 
+
+/*
+ * Support for SELECT INTO (a/k/a CREATE TABLE AS)
+ *
+ * We implement SELECT INTO by diverting SELECT's normal output with
+ * a specialized DestReceiver type.
+ */
+
+typedef struct
+{
+       DestReceiver pub;                       /* publicly-known function pointers */
+       EState     *estate;                     /* EState we are working with */
+       Relation        rel;                    /* Relation to write to */
+       int                     hi_options;             /* heap_insert performance options */
+       BulkInsertState bistate;        /* bulk insert state */
+} DR_intorel;
+
+/*
+ * OpenIntoRel --- actually create the SELECT INTO target relation
+ *
+ * This also replaces QueryDesc->dest with the special DestReceiver for
+ * SELECT INTO.  We assume that the correct result tuple type has already
+ * been placed in queryDesc->tupDesc.
+ */
 static void
-EndEvalPlanQual(EState *estate)
+OpenIntoRel(QueryDesc *queryDesc)
 {
-       evalPlanQual *epq = estate->es_evalPlanQual;
+       IntoClause *into = queryDesc->plannedstmt->intoClause;
+       EState     *estate = queryDesc->estate;
+       Relation        intoRelationDesc;
+       char       *intoName;
+       Oid                     namespaceId;
+       Oid                     tablespaceId;
+       Datum           reloptions;
+       Oid                     intoRelationId;
+       DR_intorel *myState;
+       static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
+
+       Assert(into);
+
+       /*
+        * XXX This code needs to be kept in sync with DefineRelation(). Maybe we
+        * should try to use that function instead.
+        */
+
+       /*
+        * Check consistency of arguments
+        */
+       if (into->onCommit != ONCOMMIT_NOOP
+               && into->rel->relpersistence != RELPERSISTENCE_TEMP)
+               ereport(ERROR,
+                               (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
+                                errmsg("ON COMMIT can only be used on temporary tables")));
 
-       if (epq->rti == 0)                      /* plans already shutdowned */
+       /*
+        * Find namespace to create in, check its permissions
+        */
+       intoName = into->rel->relname;
+       namespaceId = RangeVarGetAndCheckCreationNamespace(into->rel);
+       RangeVarAdjustRelationPersistence(into->rel, namespaceId);
+
+       /*
+        * Security check: disallow creating temp tables from security-restricted
+        * code.  This is needed because calling code might not expect untrusted
+        * tables to appear in pg_temp at the front of its search path.
+        */
+       if (into->rel->relpersistence == RELPERSISTENCE_TEMP
+               && InSecurityRestrictedOperation())
+               ereport(ERROR,
+                               (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+                                errmsg("cannot create temporary table within security-restricted operation")));
+
+       /*
+        * Select tablespace to use.  If not specified, use default tablespace
+        * (which may in turn default to database's default).
+        */
+       if (into->tableSpaceName)
        {
-               Assert(epq->next == NULL);
-               return;
+               tablespaceId = get_tablespace_oid(into->tableSpaceName, false);
+       }
+       else
+       {
+               tablespaceId = GetDefaultTablespace(into->rel->relpersistence);
+               /* note InvalidOid is OK in this case */
        }
 
-       for (;;)
+       /* Check permissions except when using the database's default space */
+       if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
        {
-               evalPlanQual *oldepq;
+               AclResult       aclresult;
 
-               /* stop execution */
-               EvalPlanQualStop(epq);
-               /* pop old PQ from the stack */
-               oldepq = epq->next;
-               if (oldepq == NULL)
-               {
-                       /* this is the first (oldest) PQ - mark as free */
-                       epq->rti = 0;
-                       estate->es_useEvalPlan = false;
-                       break;
-               }
-               Assert(oldepq->rti != 0);
-               /* push current PQ to freePQ stack */
-               oldepq->free = epq;
-               epq = oldepq;
-               estate->es_evalPlanQual = epq;
+               aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
+                                                                                  ACL_CREATE);
+
+               if (aclresult != ACLCHECK_OK)
+                       aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
+                                                  get_tablespace_name(tablespaceId));
        }
+
+       /* Parse and validate any reloptions */
+       reloptions = transformRelOptions((Datum) 0,
+                                                                        into->options,
+                                                                        NULL,
+                                                                        validnsps,
+                                                                        true,
+                                                                        false);
+       (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
+
+       /* Now we can actually create the new relation */
+       intoRelationId = heap_create_with_catalog(intoName,
+                                                                                         namespaceId,
+                                                                                         tablespaceId,
+                                                                                         InvalidOid,
+                                                                                         InvalidOid,
+                                                                                         InvalidOid,
+                                                                                         GetUserId(),
+                                                                                         queryDesc->tupDesc,
+                                                                                         NIL,
+                                                                                         RELKIND_RELATION,
+                                                                                         into->rel->relpersistence,
+                                                                                         false,
+                                                                                         false,
+                                                                                         true,
+                                                                                         0,
+                                                                                         into->onCommit,
+                                                                                         reloptions,
+                                                                                         true,
+                                                                                         allowSystemTableMods);
+       Assert(intoRelationId != InvalidOid);
+
+       /*
+        * Advance command counter so that the newly-created relation's catalog
+        * tuples will be visible to heap_open.
+        */
+       CommandCounterIncrement();
+
+       /*
+        * If necessary, create a TOAST table for the INTO relation. Note that
+        * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
+        * the TOAST table will be visible for insertion.
+        */
+       reloptions = transformRelOptions((Datum) 0,
+                                                                        into->options,
+                                                                        "toast",
+                                                                        validnsps,
+                                                                        true,
+                                                                        false);
+
+       (void) heap_reloptions(RELKIND_TOASTVALUE, reloptions, true);
+
+       AlterTableCreateToastTable(intoRelationId, reloptions);
+
+       /*
+        * And open the constructed table for writing.
+        */
+       intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
+
+       /*
+        * Now replace the query's DestReceiver with one for SELECT INTO
+        */
+       queryDesc->dest = CreateDestReceiver(DestIntoRel);
+       myState = (DR_intorel *) queryDesc->dest;
+       Assert(myState->pub.mydest == DestIntoRel);
+       myState->estate = estate;
+       myState->rel = intoRelationDesc;
+
+       /*
+        * We can skip WAL-logging the insertions, unless PITR or streaming
+        * replication is in use. We can skip the FSM in any case.
+        */
+       myState->hi_options = HEAP_INSERT_SKIP_FSM |
+               (XLogIsNeeded() ? 0 : HEAP_INSERT_SKIP_WAL);
+       myState->bistate = GetBulkInsertState();
+
+       /* Not using WAL requires smgr_targblock be initially invalid */
+       Assert(RelationGetTargetBlock(intoRelationDesc) == InvalidBlockNumber);
 }
 
 /*
- * Start execution of one level of PlanQual.
- *
- * This is a cut-down version of ExecutorStart(): we copy some state from
- * the top-level estate rather than initializing it fresh.
+ * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
  */
 static void
-EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
+CloseIntoRel(QueryDesc *queryDesc)
 {
-       EState     *epqstate;
-       int                     rtsize;
-       MemoryContext oldcontext;
+       DR_intorel *myState = (DR_intorel *) queryDesc->dest;
 
-       rtsize = list_length(estate->es_range_table);
+       /* OpenIntoRel might never have gotten called */
+       if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
+       {
+               FreeBulkInsertState(myState->bistate);
 
-       epq->estate = epqstate = CreateExecutorState();
+               /* If we skipped using WAL, must heap_sync before commit */
+               if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
+                       heap_sync(myState->rel);
 
-       oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
+               /* close rel, but keep lock until commit */
+               heap_close(myState->rel, NoLock);
 
-       /*
-        * The epqstates share the top query's copy of unchanging state such as
-        * the snapshot, rangetable, result-rel info, and external Param info.
-        * They need their own copies of local state, including a tuple table,
-        * es_param_exec_vals, etc.
-        */
-       epqstate->es_direction = ForwardScanDirection;
-       epqstate->es_snapshot = estate->es_snapshot;
-       epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
-       epqstate->es_range_table = estate->es_range_table;
-       epqstate->es_result_relations = estate->es_result_relations;
-       epqstate->es_num_result_relations = estate->es_num_result_relations;
-       epqstate->es_result_relation_info = estate->es_result_relation_info;
-       epqstate->es_junkFilter = estate->es_junkFilter;
-       epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
-       epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
-       epqstate->es_param_list_info = estate->es_param_list_info;
-       if (estate->es_topPlan->nParamExec > 0)
-               epqstate->es_param_exec_vals = (ParamExecData *)
-                       palloc0(estate->es_topPlan->nParamExec * sizeof(ParamExecData));
-       epqstate->es_rowMarks = estate->es_rowMarks;
-       epqstate->es_instrument = estate->es_instrument;
-       epqstate->es_select_into = estate->es_select_into;
-       epqstate->es_into_oids = estate->es_into_oids;
-       epqstate->es_topPlan = estate->es_topPlan;
-
-       /*
-        * Each epqstate must have its own es_evTupleNull state, but all the stack
-        * entries share es_evTuple state.      This allows sub-rechecks to inherit
-        * the value being examined by an outer recheck.
-        */
-       epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
-       if (priorepq == NULL)
-               /* first PQ stack entry */
-               epqstate->es_evTuple = (HeapTuple *)
-                       palloc0(rtsize * sizeof(HeapTuple));
-       else
-               /* later stack entries share the same storage */
-               epqstate->es_evTuple = priorepq->estate->es_evTuple;
+               myState->rel = NULL;
+       }
+}
+
+/*
+ * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
+ */
+DestReceiver *
+CreateIntoRelDestReceiver(void)
+{
+       DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
 
-       epqstate->es_tupleTable =
-               ExecCreateTupleTable(estate->es_tupleTable->size);
+       self->pub.receiveSlot = intorel_receive;
+       self->pub.rStartup = intorel_startup;
+       self->pub.rShutdown = intorel_shutdown;
+       self->pub.rDestroy = intorel_destroy;
+       self->pub.mydest = DestIntoRel;
 
-       epq->planstate = ExecInitNode(estate->es_topPlan, epqstate, 0);
+       /* private fields will be set by OpenIntoRel */
 
-       MemoryContextSwitchTo(oldcontext);
+       return (DestReceiver *) self;
 }
 
 /*
- * End execution of one level of PlanQual.
- *
- * This is a cut-down version of ExecutorEnd(); basically we want to do most
- * of the normal cleanup, but *not* close result relations (which we are
- * just sharing from the outer query).
+ * intorel_startup --- executor startup
  */
 static void
-EvalPlanQualStop(evalPlanQual *epq)
+intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
 {
-       EState     *epqstate = epq->estate;
-       MemoryContext oldcontext;
+       /* no-op */
+}
 
-       oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
+/*
+ * intorel_receive --- receive one tuple
+ */
+static void
+intorel_receive(TupleTableSlot *slot, DestReceiver *self)
+{
+       DR_intorel *myState = (DR_intorel *) self;
+       HeapTuple       tuple;
 
-       ExecEndNode(epq->planstate);
+       /*
+        * get the heap tuple out of the tuple table slot, making sure we have a
+        * writable copy
+        */
+       tuple = ExecMaterializeSlot(slot);
 
-       ExecDropTupleTable(epqstate->es_tupleTable, true);
-       epqstate->es_tupleTable = NULL;
+       /*
+        * force assignment of new OID (see comments in ExecInsert)
+        */
+       if (myState->rel->rd_rel->relhasoids)
+               HeapTupleSetOid(tuple, InvalidOid);
 
-       if (epqstate->es_evTuple[epq->rti - 1] != NULL)
-       {
-               heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
-               epqstate->es_evTuple[epq->rti - 1] = NULL;
-       }
+       heap_insert(myState->rel,
+                               tuple,
+                               myState->estate->es_output_cid,
+                               myState->hi_options,
+                               myState->bistate);
 
-       MemoryContextSwitchTo(oldcontext);
+       /* We know this is a newly created relation, so there are no indexes */
+}
 
-       FreeExecutorState(epqstate);
+/*
+ * intorel_shutdown --- executor end
+ */
+static void
+intorel_shutdown(DestReceiver *self)
+{
+       /* no-op */
+}
 
-       epq->estate = NULL;
-       epq->planstate = NULL;
+/*
+ * intorel_destroy --- release DestReceiver object
+ */
+static void
+intorel_destroy(DestReceiver *self)
+{
+       pfree(self);
 }