OSDN Git Service

Fix bugs in relpersistence handling during table creation.
[pg-rex/syncrep.git] / src / backend / executor / execMain.c
index 075f69c..eacd863 100644 (file)
@@ -6,34 +6,39 @@
  * INTERFACE ROUTINES
  *     ExecutorStart()
  *     ExecutorRun()
+ *     ExecutorFinish()
  *     ExecutorEnd()
  *
- *     The old ExecutorMain() has been replaced by ExecutorStart(),
- *     ExecutorRun() and ExecutorEnd()
- *
- *     These three procedures are the external interfaces to the executor.
+ *     These four procedures are the external interface to the executor.
  *     In each case, the query descriptor is required as an argument.
  *
- *     ExecutorStart() must be called at the beginning of execution of any
- *     query plan and ExecutorEnd() should always be called at the end of
- *     execution of a plan.
+ *     ExecutorStart must be called at the beginning of execution of any
+ *     query plan and ExecutorEnd must always be called at the end of
+ *     execution of a plan (unless it is aborted due to error).
  *
  *     ExecutorRun accepts direction and count arguments that specify whether
  *     the plan is to be executed forwards, backwards, and for how many tuples.
+ *     In some cases ExecutorRun may be called multiple times to process all
+ *     the tuples for a plan.  It is also acceptable to stop short of executing
+ *     the whole plan (but only if it is a SELECT).
+ *
+ *     ExecutorFinish must be called after the final ExecutorRun call and
+ *     before ExecutorEnd.  This can be omitted only in case of EXPLAIN,
+ *     which should also omit ExecutorRun.
  *
- * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *       $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.308 2008/05/12 00:00:48 alvherre Exp $
+ *       src/backend/executor/execMain.c
  *
  *-------------------------------------------------------------------------
  */
 #include "postgres.h"
 
-#include "access/heapam.h"
 #include "access/reloptions.h"
+#include "access/sysattr.h"
 #include "access/transam.h"
 #include "access/xact.h"
 #include "catalog/heap.h"
@@ -43,7 +48,6 @@
 #include "commands/trigger.h"
 #include "executor/execdebug.h"
 #include "executor/instrument.h"
-#include "executor/nodeSubplan.h"
 #include "miscadmin.h"
 #include "optimizer/clauses.h"
 #include "parser/parse_clause.h"
 #include "storage/bufmgr.h"
 #include "storage/lmgr.h"
 #include "storage/smgr.h"
+#include "tcop/utility.h"
 #include "utils/acl.h"
 #include "utils/lsyscache.h"
 #include "utils/memutils.h"
+#include "utils/snapmgr.h"
 #include "utils/tqual.h"
 
 
-typedef struct evalPlanQual
-{
-       Index           rti;
-       EState     *estate;
-       PlanState  *planstate;
-       struct evalPlanQual *next;      /* stack of active PlanQual plans */
-       struct evalPlanQual *free;      /* list of free PlanQual plans */
-} evalPlanQual;
+/* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
+ExecutorStart_hook_type ExecutorStart_hook = NULL;
+ExecutorRun_hook_type ExecutorRun_hook = NULL;
+ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
+ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
+
+/* Hook for plugin to get control in ExecCheckRTPerms() */
+ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
 
 /* decls for local routines only used within this module */
 static void InitPlan(QueryDesc *queryDesc, int eflags);
+static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
+static void ExecPostprocessPlan(EState *estate);
 static void ExecEndPlan(PlanState *planstate, EState *estate);
-static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
+static void ExecutePlan(EState *estate, PlanState *planstate,
                        CmdType operation,
+                       bool sendTuples,
                        long numberTuples,
                        ScanDirection direction,
                        DestReceiver *dest);
-static void ExecSelect(TupleTableSlot *slot,
-                  DestReceiver *dest, EState *estate);
-static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
-                  TupleTableSlot *planSlot,
-                  DestReceiver *dest, EState *estate);
-static void ExecDelete(ItemPointer tupleid,
-                  TupleTableSlot *planSlot,
-                  DestReceiver *dest, EState *estate);
-static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
-                  TupleTableSlot *planSlot,
-                  DestReceiver *dest, EState *estate);
-static void ExecProcessReturning(ProjectionInfo *projectReturning,
-                                        TupleTableSlot *tupleSlot,
-                                        TupleTableSlot *planSlot,
-                                        DestReceiver *dest);
-static TupleTableSlot *EvalPlanQualNext(EState *estate);
-static void EndEvalPlanQual(EState *estate);
-static void ExecCheckRTPerms(List *rangeTable);
-static void ExecCheckRTEPerms(RangeTblEntry *rte);
+static bool ExecCheckRTEPerms(RangeTblEntry *rte);
 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
-static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
-                                 evalPlanQual *priorepq);
-static void EvalPlanQualStop(evalPlanQual *epq);
+static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
+                                 Plan *planTree);
 static void OpenIntoRel(QueryDesc *queryDesc);
 static void CloseIntoRel(QueryDesc *queryDesc);
 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
@@ -113,8 +103,8 @@ static void intorel_destroy(DestReceiver *self);
  *             This routine must be called at the beginning of any execution of any
  *             query plan
  *
- * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
- * clear why we bother to separate the two functions, but...). The tupDesc
+ * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
+ * only because some places use QueryDescs for utility commands).  The tupDesc
  * field of the QueryDesc is filled in to describe the tuples that will be
  * returned, and the internal fields (estate and planstate) are set up.
  *
@@ -122,11 +112,25 @@ static void intorel_destroy(DestReceiver *self);
  *
  * NB: the CurrentMemoryContext when this is called will become the parent
  * of the per-query context used for this Executor invocation.
+ *
+ * We provide a function hook variable that lets loadable plugins
+ * get control when ExecutorStart is called.  Such a plugin would
+ * normally call standard_ExecutorStart().
+ *
  * ----------------------------------------------------------------
  */
 void
 ExecutorStart(QueryDesc *queryDesc, int eflags)
 {
+       if (ExecutorStart_hook)
+               (*ExecutorStart_hook) (queryDesc, eflags);
+       else
+               standard_ExecutorStart(queryDesc, eflags);
+}
+
+void
+standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
+{
        EState     *estate;
        MemoryContext oldcontext;
 
@@ -150,7 +154,8 @@ ExecutorStart(QueryDesc *queryDesc, int eflags)
        oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
 
        /*
-        * Fill in parameters, if any, from queryDesc
+        * Fill in external parameters, if any, from queryDesc; and allocate
+        * workspace for internal parameters
         */
        estate->es_param_list_info = queryDesc->params;
 
@@ -164,10 +169,24 @@ ExecutorStart(QueryDesc *queryDesc, int eflags)
        switch (queryDesc->operation)
        {
                case CMD_SELECT:
-                       /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
+
+                       /*
+                        * SELECT INTO, SELECT FOR UPDATE/SHARE and modifying CTEs need to
+                        * mark tuples
+                        */
                        if (queryDesc->plannedstmt->intoClause != NULL ||
-                               queryDesc->plannedstmt->rowMarks != NIL)
+                               queryDesc->plannedstmt->rowMarks != NIL ||
+                               queryDesc->plannedstmt->hasModifyingCTE)
                                estate->es_output_cid = GetCurrentCommandId(true);
+
+                       /*
+                        * A SELECT without modifying CTEs can't possibly queue triggers,
+                        * so force skip-triggers mode. This is just a marginal efficiency
+                        * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
+                        * all that expensive, but we might as well do it.
+                        */
+                       if (!queryDesc->plannedstmt->hasModifyingCTE)
+                               eflags |= EXEC_FLAG_SKIP_TRIGGERS;
                        break;
 
                case CMD_INSERT:
@@ -185,15 +204,23 @@ ExecutorStart(QueryDesc *queryDesc, int eflags)
        /*
         * Copy other important information into the EState
         */
-       estate->es_snapshot = queryDesc->snapshot;
-       estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
-       estate->es_instrument = queryDesc->doInstrument;
+       estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
+       estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
+       estate->es_top_eflags = eflags;
+       estate->es_instrument = queryDesc->instrument_options;
 
        /*
         * Initialize the plan state tree
         */
        InitPlan(queryDesc, eflags);
 
+       /*
+        * Set up an AFTER-trigger statement context, unless told not to, or
+        * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
+        */
+       if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
+               AfterTriggerBeginQuery();
+
        MemoryContextSwitchTo(oldcontext);
 }
 
@@ -213,17 +240,35 @@ ExecutorStart(QueryDesc *queryDesc, int eflags)
  *             Note: count = 0 is interpreted as no portal limit, i.e., run to
  *             completion.
  *
+ *             There is no return value, but output tuples (if any) are sent to
+ *             the destination receiver specified in the QueryDesc; and the number
+ *             of tuples processed at the top level can be found in
+ *             estate->es_processed.
+ *
+ *             We provide a function hook variable that lets loadable plugins
+ *             get control when ExecutorRun is called.  Such a plugin would
+ *             normally call standard_ExecutorRun().
+ *
  * ----------------------------------------------------------------
  */
-TupleTableSlot *
+void
 ExecutorRun(QueryDesc *queryDesc,
                        ScanDirection direction, long count)
 {
+       if (ExecutorRun_hook)
+               (*ExecutorRun_hook) (queryDesc, direction, count);
+       else
+               standard_ExecutorRun(queryDesc, direction, count);
+}
+
+void
+standard_ExecutorRun(QueryDesc *queryDesc,
+                                        ScanDirection direction, long count)
+{
        EState     *estate;
        CmdType         operation;
        DestReceiver *dest;
        bool            sendTuples;
-       TupleTableSlot *result;
        MemoryContext oldcontext;
 
        /* sanity checks */
@@ -232,12 +277,17 @@ ExecutorRun(QueryDesc *queryDesc,
        estate = queryDesc->estate;
 
        Assert(estate != NULL);
+       Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
 
        /*
         * Switch into per-query memory context
         */
        oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
 
+       /* Allow instrumentation of Executor overall runtime */
+       if (queryDesc->totaltime)
+               InstrStartNode(queryDesc->totaltime);
+
        /*
         * extract information from the query descriptor and the query feature.
         */
@@ -251,7 +301,7 @@ ExecutorRun(QueryDesc *queryDesc,
        estate->es_lastoid = InvalidOid;
 
        sendTuples = (operation == CMD_SELECT ||
-                                 queryDesc->plannedstmt->returningLists);
+                                 queryDesc->plannedstmt->hasReturning);
 
        if (sendTuples)
                (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
@@ -259,15 +309,14 @@ ExecutorRun(QueryDesc *queryDesc,
        /*
         * run plan
         */
-       if (ScanDirectionIsNoMovement(direction))
-               result = NULL;
-       else
-               result = ExecutePlan(estate,
-                                                        queryDesc->planstate,
-                                                        operation,
-                                                        count,
-                                                        direction,
-                                                        dest);
+       if (!ScanDirectionIsNoMovement(direction))
+               ExecutePlan(estate,
+                                       queryDesc->planstate,
+                                       operation,
+                                       sendTuples,
+                                       count,
+                                       direction,
+                                       dest);
 
        /*
         * shutdown tuple receiver, if we started it
@@ -275,9 +324,72 @@ ExecutorRun(QueryDesc *queryDesc,
        if (sendTuples)
                (*dest->rShutdown) (dest);
 
+       if (queryDesc->totaltime)
+               InstrStopNode(queryDesc->totaltime, estate->es_processed);
+
        MemoryContextSwitchTo(oldcontext);
+}
 
-       return result;
+/* ----------------------------------------------------------------
+ *             ExecutorFinish
+ *
+ *             This routine must be called after the last ExecutorRun call.
+ *             It performs cleanup such as firing AFTER triggers.      It is
+ *             separate from ExecutorEnd because EXPLAIN ANALYZE needs to
+ *             include these actions in the total runtime.
+ *
+ *             We provide a function hook variable that lets loadable plugins
+ *             get control when ExecutorFinish is called.      Such a plugin would
+ *             normally call standard_ExecutorFinish().
+ *
+ * ----------------------------------------------------------------
+ */
+void
+ExecutorFinish(QueryDesc *queryDesc)
+{
+       if (ExecutorFinish_hook)
+               (*ExecutorFinish_hook) (queryDesc);
+       else
+               standard_ExecutorFinish(queryDesc);
+}
+
+void
+standard_ExecutorFinish(QueryDesc *queryDesc)
+{
+       EState     *estate;
+       MemoryContext oldcontext;
+
+       /* sanity checks */
+       Assert(queryDesc != NULL);
+
+       estate = queryDesc->estate;
+
+       Assert(estate != NULL);
+       Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
+
+       /* This should be run once and only once per Executor instance */
+       Assert(!estate->es_finished);
+
+       /* Switch into per-query memory context */
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
+
+       /* Allow instrumentation of Executor overall runtime */
+       if (queryDesc->totaltime)
+               InstrStartNode(queryDesc->totaltime);
+
+       /* Run ModifyTable nodes to completion */
+       ExecPostprocessPlan(estate);
+
+       /* Execute queued AFTER triggers, unless told not to */
+       if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
+               AfterTriggerEndQuery(estate);
+
+       if (queryDesc->totaltime)
+               InstrStopNode(queryDesc->totaltime, 0);
+
+       MemoryContextSwitchTo(oldcontext);
+
+       estate->es_finished = true;
 }
 
 /* ----------------------------------------------------------------
@@ -285,11 +397,25 @@ ExecutorRun(QueryDesc *queryDesc,
  *
  *             This routine must be called at the end of execution of any
  *             query plan
+ *
+ *             We provide a function hook variable that lets loadable plugins
+ *             get control when ExecutorEnd is called.  Such a plugin would
+ *             normally call standard_ExecutorEnd().
+ *
  * ----------------------------------------------------------------
  */
 void
 ExecutorEnd(QueryDesc *queryDesc)
 {
+       if (ExecutorEnd_hook)
+               (*ExecutorEnd_hook) (queryDesc);
+       else
+               standard_ExecutorEnd(queryDesc);
+}
+
+void
+standard_ExecutorEnd(QueryDesc *queryDesc)
+{
        EState     *estate;
        MemoryContext oldcontext;
 
@@ -301,6 +427,14 @@ ExecutorEnd(QueryDesc *queryDesc)
        Assert(estate != NULL);
 
        /*
+        * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
+        * Assert is needed because ExecutorFinish is new as of 9.1, and callers
+        * might forget to call it.
+        */
+       Assert(estate->es_finished ||
+                  (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
+
+       /*
         * Switch into per-query memory context to run ExecEndPlan
         */
        oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
@@ -313,6 +447,10 @@ ExecutorEnd(QueryDesc *queryDesc)
        if (estate->es_select_into)
                CloseIntoRel(queryDesc);
 
+       /* do away with our snapshots */
+       UnregisterSnapshot(estate->es_snapshot);
+       UnregisterSnapshot(estate->es_crosscheck_snapshot);
+
        /*
         * Must switch out of context before destroying it
         */
@@ -328,6 +466,7 @@ ExecutorEnd(QueryDesc *queryDesc)
        queryDesc->tupDesc = NULL;
        queryDesc->estate = NULL;
        queryDesc->planstate = NULL;
+       queryDesc->totaltime = NULL;
 }
 
 /* ----------------------------------------------------------------
@@ -361,7 +500,7 @@ ExecutorRewind(QueryDesc *queryDesc)
        /*
         * rescan plan
         */
-       ExecReScan(queryDesc->planstate, NULL);
+       ExecReScan(queryDesc->planstate);
 
        MemoryContextSwitchTo(oldcontext);
 }
@@ -370,28 +509,51 @@ ExecutorRewind(QueryDesc *queryDesc)
 /*
  * ExecCheckRTPerms
  *             Check access permissions for all relations listed in a range table.
+ *
+ * Returns true if permissions are adequate.  Otherwise, throws an appropriate
+ * error if ereport_on_violation is true, or simply returns false otherwise.
  */
-static void
-ExecCheckRTPerms(List *rangeTable)
+bool
+ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
 {
        ListCell   *l;
+       bool            result = true;
 
        foreach(l, rangeTable)
        {
-               ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
+               RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
+
+               result = ExecCheckRTEPerms(rte);
+               if (!result)
+               {
+                       Assert(rte->rtekind == RTE_RELATION);
+                       if (ereport_on_violation)
+                               aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
+                                                          get_rel_name(rte->relid));
+                       return false;
+               }
        }
+
+       if (ExecutorCheckPerms_hook)
+               result = (*ExecutorCheckPerms_hook) (rangeTable,
+                                                                                        ereport_on_violation);
+       return result;
 }
 
 /*
  * ExecCheckRTEPerms
  *             Check access permissions for a single RTE.
  */
-static void
+static bool
 ExecCheckRTEPerms(RangeTblEntry *rte)
 {
        AclMode         requiredPerms;
+       AclMode         relPerms;
+       AclMode         remainingPerms;
        Oid                     relOid;
        Oid                     userid;
+       Bitmapset  *tmpset;
+       int                     col;
 
        /*
         * Only plain-relation RTEs need to be checked here.  Function RTEs are
@@ -399,14 +561,14 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
         * Join, subquery, and special RTEs need no checks.
         */
        if (rte->rtekind != RTE_RELATION)
-               return;
+               return true;
 
        /*
         * No work if requiredPerms is empty.
         */
        requiredPerms = rte->requiredPerms;
        if (requiredPerms == 0)
-               return;
+               return true;
 
        relOid = rte->relid;
 
@@ -421,16 +583,113 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
        userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
 
        /*
-        * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
+        * We must have *all* the requiredPerms bits, but some of the bits can be
+        * satisfied from column-level rather than relation-level permissions.
+        * First, remove any bits that are satisfied by relation permissions.
         */
-       if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
-               != requiredPerms)
-               aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
-                                          get_rel_name(relOid));
+       relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
+       remainingPerms = requiredPerms & ~relPerms;
+       if (remainingPerms != 0)
+       {
+               /*
+                * If we lack any permissions that exist only as relation permissions,
+                * we can fail straight away.
+                */
+               if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
+                       return false;
+
+               /*
+                * Check to see if we have the needed privileges at column level.
+                *
+                * Note: failures just report a table-level error; it would be nicer
+                * to report a column-level error if we have some but not all of the
+                * column privileges.
+                */
+               if (remainingPerms & ACL_SELECT)
+               {
+                       /*
+                        * When the query doesn't explicitly reference any columns (for
+                        * example, SELECT COUNT(*) FROM table), allow the query if we
+                        * have SELECT on any column of the rel, as per SQL spec.
+                        */
+                       if (bms_is_empty(rte->selectedCols))
+                       {
+                               if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
+                                                                                         ACLMASK_ANY) != ACLCHECK_OK)
+                                       return false;
+                       }
+
+                       tmpset = bms_copy(rte->selectedCols);
+                       while ((col = bms_first_member(tmpset)) >= 0)
+                       {
+                               /* remove the column number offset */
+                               col += FirstLowInvalidHeapAttributeNumber;
+                               if (col == InvalidAttrNumber)
+                               {
+                                       /* Whole-row reference, must have priv on all cols */
+                                       if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
+                                                                                                 ACLMASK_ALL) != ACLCHECK_OK)
+                                               return false;
+                               }
+                               else
+                               {
+                                       if (pg_attribute_aclcheck(relOid, col, userid,
+                                                                                         ACL_SELECT) != ACLCHECK_OK)
+                                               return false;
+                               }
+                       }
+                       bms_free(tmpset);
+               }
+
+               /*
+                * Basically the same for the mod columns, with either INSERT or
+                * UPDATE privilege as specified by remainingPerms.
+                */
+               remainingPerms &= ~ACL_SELECT;
+               if (remainingPerms != 0)
+               {
+                       /*
+                        * When the query doesn't explicitly change any columns, allow the
+                        * query if we have permission on any column of the rel.  This is
+                        * to handle SELECT FOR UPDATE as well as possible corner cases in
+                        * INSERT and UPDATE.
+                        */
+                       if (bms_is_empty(rte->modifiedCols))
+                       {
+                               if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
+                                                                                         ACLMASK_ANY) != ACLCHECK_OK)
+                                       return false;
+                       }
+
+                       tmpset = bms_copy(rte->modifiedCols);
+                       while ((col = bms_first_member(tmpset)) >= 0)
+                       {
+                               /* remove the column number offset */
+                               col += FirstLowInvalidHeapAttributeNumber;
+                               if (col == InvalidAttrNumber)
+                               {
+                                       /* whole-row reference can't happen here */
+                                       elog(ERROR, "whole-row update is not implemented");
+                               }
+                               else
+                               {
+                                       if (pg_attribute_aclcheck(relOid, col, userid,
+                                                                                         remainingPerms) != ACLCHECK_OK)
+                                               return false;
+                               }
+                       }
+                       bms_free(tmpset);
+               }
+       }
+       return true;
 }
 
 /*
  * Check that the query does not imply any writes to non-temp tables.
+ *
+ * Note: in a Hot Standby slave this would need to reject writes to temp
+ * tables as well; but an HS slave can't have created any temp tables
+ * in the first place, so no need to check that.
  */
 static void
 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
@@ -440,10 +699,11 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
        /*
         * CREATE TABLE AS or SELECT INTO?
         *
-        * XXX should we allow this if the destination is temp?
+        * XXX should we allow this if the destination is temp?  Considering that
+        * it would still require catalog changes, probably not.
         */
        if (plannedstmt->intoClause != NULL)
-               goto fail;
+               PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
 
        /* Fail if write permissions are requested on any non-temp table */
        foreach(l, plannedstmt->rtable)
@@ -459,15 +719,8 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
                if (isTempNamespace(get_rel_namespace(rte->relid)))
                        continue;
 
-               goto fail;
+               PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
        }
-
-       return;
-
-fail:
-       ereport(ERROR,
-                       (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
-                        errmsg("transaction is read-only")));
 }
 
 
@@ -494,15 +747,19 @@ InitPlan(QueryDesc *queryDesc, int eflags)
        /*
         * Do permissions checks
         */
-       ExecCheckRTPerms(rangeTable);
+       ExecCheckRTPerms(rangeTable, true);
 
        /*
         * initialize the node's execution state
         */
        estate->es_range_table = rangeTable;
+       estate->es_plannedstmt = plannedstmt;
 
        /*
-        * initialize result relation stuff
+        * initialize result relation stuff, and open/lock the result rels.
+        *
+        * We must do this before initializing the plan tree, else we might try to
+        * do a lock upgrade if a result rel is also a source rel.
         */
        if (plannedstmt->resultRelations)
        {
@@ -525,14 +782,13 @@ InitPlan(QueryDesc *queryDesc, int eflags)
                        InitResultRelInfo(resultRelInfo,
                                                          resultRelation,
                                                          resultRelationIndex,
-                                                         operation,
                                                          estate->es_instrument);
                        resultRelInfo++;
                }
                estate->es_result_relations = resultRelInfos;
                estate->es_num_result_relations = numResultRelations;
-               /* Initialize to first or only result rel */
-               estate->es_result_relation_info = resultRelInfos;
+               /* es_result_relation_info is NULL except when within ModifyTable */
+               estate->es_result_relation_info = NULL;
        }
        else
        {
@@ -545,83 +801,81 @@ InitPlan(QueryDesc *queryDesc, int eflags)
        }
 
        /*
-        * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
-        * flag appropriately so that the plan tree will be initialized with the
-        * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
-        */
-       estate->es_select_into = false;
-       if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
-       {
-               estate->es_select_into = true;
-               estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
-       }
-
-       /*
-        * Have to lock relations selected FOR UPDATE/FOR SHARE before we
-        * initialize the plan tree, else we'd be doing a lock upgrade. While we
-        * are at it, build the ExecRowMark list.
+        * Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
+        * before we initialize the plan tree, else we'd be risking lock upgrades.
+        * While we are at it, build the ExecRowMark list.
         */
        estate->es_rowMarks = NIL;
        foreach(l, plannedstmt->rowMarks)
        {
-               RowMarkClause *rc = (RowMarkClause *) lfirst(l);
-               Oid                     relid = getrelid(rc->rti, rangeTable);
+               PlanRowMark *rc = (PlanRowMark *) lfirst(l);
+               Oid                     relid;
                Relation        relation;
                ExecRowMark *erm;
 
-               relation = heap_open(relid, RowShareLock);
+               /* ignore "parent" rowmarks; they are irrelevant at runtime */
+               if (rc->isParent)
+                       continue;
+
+               switch (rc->markType)
+               {
+                       case ROW_MARK_EXCLUSIVE:
+                       case ROW_MARK_SHARE:
+                               relid = getrelid(rc->rti, rangeTable);
+                               relation = heap_open(relid, RowShareLock);
+                               break;
+                       case ROW_MARK_REFERENCE:
+                               relid = getrelid(rc->rti, rangeTable);
+                               relation = heap_open(relid, AccessShareLock);
+                               break;
+                       case ROW_MARK_COPY:
+                               /* there's no real table here ... */
+                               relation = NULL;
+                               break;
+                       default:
+                               elog(ERROR, "unrecognized markType: %d", rc->markType);
+                               relation = NULL;        /* keep compiler quiet */
+                               break;
+               }
+
+               /* Check that relation is a legal target for marking */
+               if (relation)
+                       CheckValidRowMarkRel(relation, rc->markType);
+
                erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
                erm->relation = relation;
                erm->rti = rc->rti;
-               erm->forUpdate = rc->forUpdate;
+               erm->prti = rc->prti;
+               erm->rowmarkId = rc->rowmarkId;
+               erm->markType = rc->markType;
                erm->noWait = rc->noWait;
-               /* We'll set up ctidAttno below */
-               erm->ctidAttNo = InvalidAttrNumber;
+               ItemPointerSetInvalid(&(erm->curCtid));
                estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
        }
 
        /*
-        * Initialize the executor "tuple" table.  We need slots for all the plan
-        * nodes, plus possibly output slots for the junkfilter(s). At this point
-        * we aren't sure if we need junkfilters, so just add slots for them
-        * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
-        * trigger output tuples.  Also, one for RETURNING-list evaluation.
+        * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
+        * flag appropriately so that the plan tree will be initialized with the
+        * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
         */
+       estate->es_select_into = false;
+       if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
        {
-               int                     nSlots;
-
-               /* Slots for the main plan tree */
-               nSlots = ExecCountSlotsNode(plan);
-               /* Add slots for subplans and initplans */
-               foreach(l, plannedstmt->subplans)
-               {
-                       Plan       *subplan = (Plan *) lfirst(l);
-
-                       nSlots += ExecCountSlotsNode(subplan);
-               }
-               /* Add slots for junkfilter(s) */
-               if (plannedstmt->resultRelations != NIL)
-                       nSlots += list_length(plannedstmt->resultRelations);
-               else
-                       nSlots += 1;
-               if (operation != CMD_SELECT)
-                       nSlots++;                       /* for es_trig_tuple_slot */
-               if (plannedstmt->returningLists)
-                       nSlots++;                       /* for RETURNING projection */
-
-               estate->es_tupleTable = ExecCreateTupleTable(nSlots);
-
-               if (operation != CMD_SELECT)
-                       estate->es_trig_tuple_slot =
-                               ExecAllocTableSlot(estate->es_tupleTable);
+               estate->es_select_into = true;
+               estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
        }
 
+       /*
+        * Initialize the executor's tuple table to empty.
+        */
+       estate->es_tupleTable = NIL;
+       estate->es_trig_tuple_slot = NULL;
+       estate->es_trig_oldtup_slot = NULL;
+
        /* mark EvalPlanQual not active */
-       estate->es_plannedstmt = plannedstmt;
-       estate->es_evalPlanQual = NULL;
-       estate->es_evTupleNull = NULL;
-       estate->es_evTuple = NULL;
-       estate->es_useEvalPlan = false;
+       estate->es_epqTuple = NULL;
+       estate->es_epqTupleSet = NULL;
+       estate->es_epqScanDone = NULL;
 
        /*
         * Initialize private state information for each SubPlan.  We must do this
@@ -668,190 +922,36 @@ InitPlan(QueryDesc *queryDesc, int eflags)
        tupType = ExecGetResultType(planstate);
 
        /*
-        * Initialize the junk filter if needed.  SELECT and INSERT queries need a
-        * filter if there are any junk attrs in the tlist.  INSERT and SELECT
-        * INTO also need a filter if the plan may return raw disk tuples (else
-        * heap_insert will be scribbling on the source relation!). UPDATE and
-        * DELETE always need a filter, since there's always a junk 'ctid'
-        * attribute present --- no need to look first.
+        * Initialize the junk filter if needed.  SELECT queries need a filter if
+        * there are any junk attrs in the top-level tlist.
         */
+       if (operation == CMD_SELECT)
        {
                bool            junk_filter_needed = false;
                ListCell   *tlist;
 
-               switch (operation)
+               foreach(tlist, plan->targetlist)
                {
-                       case CMD_SELECT:
-                       case CMD_INSERT:
-                               foreach(tlist, plan->targetlist)
-                               {
-                                       TargetEntry *tle = (TargetEntry *) lfirst(tlist);
+                       TargetEntry *tle = (TargetEntry *) lfirst(tlist);
 
-                                       if (tle->resjunk)
-                                       {
-                                               junk_filter_needed = true;
-                                               break;
-                                       }
-                               }
-                               if (!junk_filter_needed &&
-                                       (operation == CMD_INSERT || estate->es_select_into) &&
-                                       ExecMayReturnRawTuples(planstate))
-                                       junk_filter_needed = true;
-                               break;
-                       case CMD_UPDATE:
-                       case CMD_DELETE:
+                       if (tle->resjunk)
+                       {
                                junk_filter_needed = true;
                                break;
-                       default:
-                               break;
+                       }
                }
 
                if (junk_filter_needed)
                {
-                       /*
-                        * If there are multiple result relations, each one needs its own
-                        * junk filter.  Note this is only possible for UPDATE/DELETE, so
-                        * we can't be fooled by some needing a filter and some not.
-                        */
-                       if (list_length(plannedstmt->resultRelations) > 1)
-                       {
-                               PlanState **appendplans;
-                               int                     as_nplans;
-                               ResultRelInfo *resultRelInfo;
-
-                               /* Top plan had better be an Append here. */
-                               Assert(IsA(plan, Append));
-                               Assert(((Append *) plan)->isTarget);
-                               Assert(IsA(planstate, AppendState));
-                               appendplans = ((AppendState *) planstate)->appendplans;
-                               as_nplans = ((AppendState *) planstate)->as_nplans;
-                               Assert(as_nplans == estate->es_num_result_relations);
-                               resultRelInfo = estate->es_result_relations;
-                               for (i = 0; i < as_nplans; i++)
-                               {
-                                       PlanState  *subplan = appendplans[i];
-                                       JunkFilter *j;
-
-                                       j = ExecInitJunkFilter(subplan->plan->targetlist,
-                                                       resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
-                                                                 ExecAllocTableSlot(estate->es_tupleTable));
-
-                                       /*
-                                        * Since it must be UPDATE/DELETE, there had better be a
-                                        * "ctid" junk attribute in the tlist ... but ctid could
-                                        * be at a different resno for each result relation. We
-                                        * look up the ctid resnos now and save them in the
-                                        * junkfilters.
-                                        */
-                                       j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
-                                       if (!AttributeNumberIsValid(j->jf_junkAttNo))
-                                               elog(ERROR, "could not find junk ctid column");
-                                       resultRelInfo->ri_junkFilter = j;
-                                       resultRelInfo++;
-                               }
-
-                               /*
-                                * Set active junkfilter too; at this point ExecInitAppend has
-                                * already selected an active result relation...
-                                */
-                               estate->es_junkFilter =
-                                       estate->es_result_relation_info->ri_junkFilter;
-
-                               /*
-                                * We currently can't support rowmarks in this case, because
-                                * the associated junk CTIDs might have different resnos in
-                                * different subplans.
-                                */
-                               if (estate->es_rowMarks)
-                                       ereport(ERROR,
-                                                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                        errmsg("SELECT FOR UPDATE/SHARE is not supported within a query with multiple result relations")));
-                       }
-                       else
-                       {
-                               /* Normal case with just one JunkFilter */
-                               JunkFilter *j;
-
-                               j = ExecInitJunkFilter(planstate->plan->targetlist,
-                                                                          tupType->tdhasoid,
-                                                                 ExecAllocTableSlot(estate->es_tupleTable));
-                               estate->es_junkFilter = j;
-                               if (estate->es_result_relation_info)
-                                       estate->es_result_relation_info->ri_junkFilter = j;
-
-                               if (operation == CMD_SELECT)
-                               {
-                                       /* For SELECT, want to return the cleaned tuple type */
-                                       tupType = j->jf_cleanTupType;
-                               }
-                               else if (operation == CMD_UPDATE || operation == CMD_DELETE)
-                               {
-                                       /* For UPDATE/DELETE, find the ctid junk attr now */
-                                       j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
-                                       if (!AttributeNumberIsValid(j->jf_junkAttNo))
-                                               elog(ERROR, "could not find junk ctid column");
-                               }
-
-                               /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */
-                               foreach(l, estate->es_rowMarks)
-                               {
-                                       ExecRowMark *erm = (ExecRowMark *) lfirst(l);
-                                       char            resname[32];
-
-                                       snprintf(resname, sizeof(resname), "ctid%u", erm->rti);
-                                       erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
-                                       if (!AttributeNumberIsValid(erm->ctidAttNo))
-                                               elog(ERROR, "could not find junk \"%s\" column",
-                                                        resname);
-                               }
-                       }
-               }
-               else
-               {
-                       estate->es_junkFilter = NULL;
-                       if (estate->es_rowMarks)
-                               elog(ERROR, "SELECT FOR UPDATE/SHARE, but no junk columns");
-               }
-       }
-
-       /*
-        * Initialize RETURNING projections if needed.
-        */
-       if (plannedstmt->returningLists)
-       {
-               TupleTableSlot *slot;
-               ExprContext *econtext;
-               ResultRelInfo *resultRelInfo;
-
-               /*
-                * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
-                * We assume all the sublists will generate the same output tupdesc.
-                */
-               tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
-                                                                false);
-
-               /* Set up a slot for the output of the RETURNING projection(s) */
-               slot = ExecAllocTableSlot(estate->es_tupleTable);
-               ExecSetSlotDescriptor(slot, tupType);
-               /* Need an econtext too */
-               econtext = CreateExprContext(estate);
+                       JunkFilter *j;
 
-               /*
-                * Build a projection for each result rel.      Note that any SubPlans in
-                * the RETURNING lists get attached to the topmost plan node.
-                */
-               Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
-               resultRelInfo = estate->es_result_relations;
-               foreach(l, plannedstmt->returningLists)
-               {
-                       List       *rlist = (List *) lfirst(l);
-                       List       *rliststate;
+                       j = ExecInitJunkFilter(planstate->plan->targetlist,
+                                                                  tupType->tdhasoid,
+                                                                  ExecInitExtraTupleSlot(estate));
+                       estate->es_junkFilter = j;
 
-                       rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
-                       resultRelInfo->ri_projectReturning =
-                               ExecBuildProjectionInfo(rliststate, econtext, slot,
-                                                                        resultRelInfo->ri_RelationDesc->rd_att);
-                       resultRelInfo++;
+                       /* Want to return the cleaned tuple type */
+                       tupType = j->jf_cleanTupType;
                }
        }
 
@@ -870,20 +970,21 @@ InitPlan(QueryDesc *queryDesc, int eflags)
 }
 
 /*
- * Initialize ResultRelInfo data for one result relation
+ * Check that a proposed result relation is a legal target for the operation
+ *
+ * In most cases parser and/or planner should have noticed this already, but
+ * let's make sure.  In the view case we do need a test here, because if the
+ * view wasn't rewritten by a rule, it had better have an INSTEAD trigger.
+ *
+ * Note: when changing this function, you probably also need to look at
+ * CheckValidRowMarkRel.
  */
 void
-InitResultRelInfo(ResultRelInfo *resultRelInfo,
-                                 Relation resultRelationDesc,
-                                 Index resultRelationIndex,
-                                 CmdType operation,
-                                 bool doInstrument)
+CheckValidResultRel(Relation resultRel, CmdType operation)
 {
-       /*
-        * Check valid relkind ... parser and/or planner should have noticed this
-        * already, but let's make sure.
-        */
-       switch (resultRelationDesc->rd_rel->relkind)
+       TriggerDesc *trigDesc = resultRel->trigdesc;
+
+       switch (resultRel->rd_rel->relkind)
        {
                case RELKIND_RELATION:
                        /* OK */
@@ -892,67 +993,154 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
                        ereport(ERROR,
                                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                                         errmsg("cannot change sequence \"%s\"",
-                                                       RelationGetRelationName(resultRelationDesc))));
+                                                       RelationGetRelationName(resultRel))));
                        break;
                case RELKIND_TOASTVALUE:
                        ereport(ERROR,
                                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                                         errmsg("cannot change TOAST relation \"%s\"",
-                                                       RelationGetRelationName(resultRelationDesc))));
+                                                       RelationGetRelationName(resultRel))));
                        break;
                case RELKIND_VIEW:
+                       switch (operation)
+                       {
+                               case CMD_INSERT:
+                                       if (!trigDesc || !trigDesc->trig_insert_instead_row)
+                                               ereport(ERROR,
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("cannot insert into view \"%s\"",
+                                                                 RelationGetRelationName(resultRel)),
+                                                  errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger.")));
+                                       break;
+                               case CMD_UPDATE:
+                                       if (!trigDesc || !trigDesc->trig_update_instead_row)
+                                               ereport(ERROR,
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("cannot update view \"%s\"",
+                                                                 RelationGetRelationName(resultRel)),
+                                                  errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger.")));
+                                       break;
+                               case CMD_DELETE:
+                                       if (!trigDesc || !trigDesc->trig_delete_instead_row)
+                                               ereport(ERROR,
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("cannot delete from view \"%s\"",
+                                                                 RelationGetRelationName(resultRel)),
+                                                  errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger.")));
+                                       break;
+                               default:
+                                       elog(ERROR, "unrecognized CmdType: %d", (int) operation);
+                                       break;
+                       }
+                       break;
+               case RELKIND_FOREIGN_TABLE:
                        ereport(ERROR,
                                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                        errmsg("cannot change view \"%s\"",
-                                                       RelationGetRelationName(resultRelationDesc))));
+                                        errmsg("cannot change foreign table \"%s\"",
+                                                       RelationGetRelationName(resultRel))));
                        break;
                default:
                        ereport(ERROR,
                                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                                         errmsg("cannot change relation \"%s\"",
-                                                       RelationGetRelationName(resultRelationDesc))));
+                                                       RelationGetRelationName(resultRel))));
                        break;
        }
+}
 
-       /* OK, fill in the node */
-       MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
-       resultRelInfo->type = T_ResultRelInfo;
-       resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
-       resultRelInfo->ri_RelationDesc = resultRelationDesc;
-       resultRelInfo->ri_NumIndices = 0;
-       resultRelInfo->ri_IndexRelationDescs = NULL;
-       resultRelInfo->ri_IndexRelationInfo = NULL;
-       /* make a copy so as not to depend on relcache info not changing... */
-       resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
-       if (resultRelInfo->ri_TrigDesc)
-       {
-               int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
-
-               resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
-                       palloc0(n * sizeof(FmgrInfo));
-               if (doInstrument)
-                       resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
-               else
-                       resultRelInfo->ri_TrigInstrument = NULL;
-       }
-       else
+/*
+ * Check that a proposed rowmark target relation is a legal target
+ *
+ * In most cases parser and/or planner should have noticed this already, but
+ * they don't cover all cases.
+ */
+static void
+CheckValidRowMarkRel(Relation rel, RowMarkType markType)
+{
+       switch (rel->rd_rel->relkind)
+       {
+               case RELKIND_RELATION:
+                       /* OK */
+                       break;
+               case RELKIND_SEQUENCE:
+                       /* Must disallow this because we don't vacuum sequences */
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot lock rows in sequence \"%s\"",
+                                                       RelationGetRelationName(rel))));
+                       break;
+               case RELKIND_TOASTVALUE:
+                       /* We could allow this, but there seems no good reason to */
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot lock rows in TOAST relation \"%s\"",
+                                                       RelationGetRelationName(rel))));
+                       break;
+               case RELKIND_VIEW:
+                       /* Should not get here */
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot lock rows in view \"%s\"",
+                                                       RelationGetRelationName(rel))));
+                       break;
+               case RELKIND_FOREIGN_TABLE:
+                       /* Perhaps we can support this someday, but not today */
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot lock rows in foreign table \"%s\"",
+                                                       RelationGetRelationName(rel))));
+                       break;
+               default:
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+                                        errmsg("cannot lock rows in relation \"%s\"",
+                                                       RelationGetRelationName(rel))));
+                       break;
+       }
+}
+
+/*
+ * Initialize ResultRelInfo data for one result relation
+ *
+ * Caution: before Postgres 9.1, this function included the relkind checking
+ * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
+ * appropriate.  Be sure callers cover those needs.
+ */
+void
+InitResultRelInfo(ResultRelInfo *resultRelInfo,
+                                 Relation resultRelationDesc,
+                                 Index resultRelationIndex,
+                                 int instrument_options)
+{
+       MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
+       resultRelInfo->type = T_ResultRelInfo;
+       resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
+       resultRelInfo->ri_RelationDesc = resultRelationDesc;
+       resultRelInfo->ri_NumIndices = 0;
+       resultRelInfo->ri_IndexRelationDescs = NULL;
+       resultRelInfo->ri_IndexRelationInfo = NULL;
+       /* make a copy so as not to depend on relcache info not changing... */
+       resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
+       if (resultRelInfo->ri_TrigDesc)
+       {
+               int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
+
+               resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
+                       palloc0(n * sizeof(FmgrInfo));
+               resultRelInfo->ri_TrigWhenExprs = (List **)
+                       palloc0(n * sizeof(List *));
+               if (instrument_options)
+                       resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
+       }
+       else
        {
                resultRelInfo->ri_TrigFunctions = NULL;
+               resultRelInfo->ri_TrigWhenExprs = NULL;
                resultRelInfo->ri_TrigInstrument = NULL;
        }
        resultRelInfo->ri_ConstraintExprs = NULL;
        resultRelInfo->ri_junkFilter = NULL;
        resultRelInfo->ri_projectReturning = NULL;
-
-       /*
-        * If there are indices on the result relation, open them and save
-        * descriptors in the result relation info, so that we can add new index
-        * entries for the tuples we add/update.  We need not do this for a
-        * DELETE, however, since deletion doesn't affect indexes.
-        */
-       if (resultRelationDesc->rd_rel->relhasindex &&
-               operation != CMD_DELETE)
-               ExecOpenIndices(resultRelInfo);
 }
 
 /*
@@ -1002,26 +1190,29 @@ ExecGetTriggerResultRel(EState *estate, Oid relid)
        /*
         * Open the target relation's relcache entry.  We assume that an
         * appropriate lock is still held by the backend from whenever the trigger
-        * event got queued, so we need take no new lock here.
+        * event got queued, so we need take no new lock here.  Also, we need not
+        * recheck the relkind, so no need for CheckValidResultRel.
         */
        rel = heap_open(relid, NoLock);
 
        /*
-        * Make the new entry in the right context.  Currently, we don't need any
-        * index information in ResultRelInfos used only for triggers, so tell
-        * InitResultRelInfo it's a DELETE.
+        * Make the new entry in the right context.
         */
        oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
        rInfo = makeNode(ResultRelInfo);
        InitResultRelInfo(rInfo,
                                          rel,
                                          0,            /* dummy rangetable index */
-                                         CMD_DELETE,
                                          estate->es_instrument);
        estate->es_trig_target_relations =
                lappend(estate->es_trig_target_relations, rInfo);
        MemoryContextSwitchTo(oldcontext);
 
+       /*
+        * Currently, we don't need any index information in ResultRelInfos used
+        * only for triggers, so no need to call ExecOpenIndices.
+        */
+
        return rInfo;
 }
 
@@ -1045,11 +1236,13 @@ ExecGetTriggerResultRel(EState *estate, Oid relid)
  * recognize how far down the requirement really goes, but for now we just
  * make all plan nodes do the same thing if the top level forces the choice.
  *
- * We assume that estate->es_result_relation_info is already set up to
- * describe the target relation.  Note that in an UPDATE that spans an
- * inheritance tree, some of the target relations may have OIDs and some not.
- * We have to make the decisions on a per-relation basis as we initialize
- * each of the child plans of the topmost Append plan.
+ * We assume that if we are generating tuples for INSERT or UPDATE,
+ * estate->es_result_relation_info is already set up to describe the target
+ * relation.  Note that in an UPDATE that spans an inheritance tree, some of
+ * the target relations may have OIDs and some not.  We have to make the
+ * decisions on a per-relation basis as we initialize each of the subplans of
+ * the ModifyTable node, so ModifyTable has to set es_result_relation_info
+ * while initializing each subplan.
  *
  * SELECT INTO is even uglier, because we don't have the INTO relation's
  * descriptor available when this code runs; we have to look aside at a
@@ -1058,28 +1251,66 @@ ExecGetTriggerResultRel(EState *estate, Oid relid)
 bool
 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
 {
+       ResultRelInfo *ri = planstate->state->es_result_relation_info;
+
+       if (ri != NULL)
+       {
+               Relation        rel = ri->ri_RelationDesc;
+
+               if (rel != NULL)
+               {
+                       *hasoids = rel->rd_rel->relhasoids;
+                       return true;
+               }
+       }
+
        if (planstate->state->es_select_into)
        {
                *hasoids = planstate->state->es_into_oids;
                return true;
        }
-       else
+
+       return false;
+}
+
+/* ----------------------------------------------------------------
+ *             ExecPostprocessPlan
+ *
+ *             Give plan nodes a final chance to execute before shutdown
+ * ----------------------------------------------------------------
+ */
+static void
+ExecPostprocessPlan(EState *estate)
+{
+       ListCell   *lc;
+
+       /*
+        * Make sure nodes run forward.
+        */
+       estate->es_direction = ForwardScanDirection;
+
+       /*
+        * Run any secondary ModifyTable nodes to completion, in case the main
+        * query did not fetch all rows from them.      (We do this to ensure that
+        * such nodes have predictable results.)
+        */
+       foreach(lc, estate->es_auxmodifytables)
        {
-               ResultRelInfo *ri = planstate->state->es_result_relation_info;
+               PlanState  *ps = (PlanState *) lfirst(lc);
 
-               if (ri != NULL)
+               for (;;)
                {
-                       Relation        rel = ri->ri_RelationDesc;
+                       TupleTableSlot *slot;
 
-                       if (rel != NULL)
-                       {
-                               *hasoids = rel->rd_rel->relhasoids;
-                               return true;
-                       }
+                       /* Reset the per-output-tuple exprcontext each time */
+                       ResetPerTupleExprContext(estate);
+
+                       slot = ExecProcNode(ps);
+
+                       if (TupIsNull(slot))
+                               break;
                }
        }
-
-       return false;
 }
 
 /* ----------------------------------------------------------------
@@ -1102,12 +1333,6 @@ ExecEndPlan(PlanState *planstate, EState *estate)
        ListCell   *l;
 
        /*
-        * shut down any PlanQual processing we were doing
-        */
-       if (estate->es_evalPlanQual != NULL)
-               EndEvalPlanQual(estate);
-
-       /*
         * shut down the node-type-specific query processing
         */
        ExecEndNode(planstate);
@@ -1123,10 +1348,12 @@ ExecEndPlan(PlanState *planstate, EState *estate)
        }
 
        /*
-        * destroy the executor "tuple" table.
+        * destroy the executor's tuple table.  Actually we only care about
+        * releasing buffer pins and tupdesc refcounts; there's no need to pfree
+        * the TupleTableSlots, since the containing memory context is about to go
+        * away anyway.
         */
-       ExecDropTupleTable(estate->es_tupleTable, true);
-       estate->es_tupleTable = NULL;
+       ExecResetTupleTable(estate->es_tupleTable, false);
 
        /*
         * close the result relation(s) if any, but hold locks until xact commit.
@@ -1156,48 +1383,41 @@ ExecEndPlan(PlanState *planstate, EState *estate)
         */
        foreach(l, estate->es_rowMarks)
        {
-               ExecRowMark *erm = lfirst(l);
+               ExecRowMark *erm = (ExecRowMark *) lfirst(l);
 
-               heap_close(erm->relation, NoLock);
+               if (erm->relation)
+                       heap_close(erm->relation, NoLock);
        }
 }
 
 /* ----------------------------------------------------------------
  *             ExecutePlan
  *
- *             processes the query plan to retrieve 'numberTuples' tuples in the
- *             direction specified.
+ *             Processes the query plan until we have processed 'numberTuples' tuples,
+ *             moving in the specified direction.
  *
- *             Retrieves all tuples if numberTuples is 0
- *
- *             result is either a slot containing the last tuple in the case
- *             of a SELECT or NULL otherwise.
+ *             Runs to completion if numberTuples is 0
  *
  * Note: the ctid attribute is a 'junk' attribute that is removed before the
  * user can see it
  * ----------------------------------------------------------------
  */
-static TupleTableSlot *
+static void
 ExecutePlan(EState *estate,
                        PlanState *planstate,
                        CmdType operation,
+                       bool sendTuples,
                        long numberTuples,
                        ScanDirection direction,
                        DestReceiver *dest)
 {
-       JunkFilter *junkfilter;
-       TupleTableSlot *planSlot;
        TupleTableSlot *slot;
-       ItemPointer tupleid = NULL;
-       ItemPointerData tuple_ctid;
        long            current_tuple_count;
-       TupleTableSlot *result;
 
        /*
         * initialize local variables
         */
        current_tuple_count = 0;
-       result = NULL;
 
        /*
         * Set the direction.
@@ -1205,28 +1425,8 @@ ExecutePlan(EState *estate,
        estate->es_direction = direction;
 
        /*
-        * Process BEFORE EACH STATEMENT triggers
-        */
-       switch (operation)
-       {
-               case CMD_UPDATE:
-                       ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
-                       break;
-               case CMD_DELETE:
-                       ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
-                       break;
-               case CMD_INSERT:
-                       ExecBSInsertTriggers(estate, estate->es_result_relation_info);
-                       break;
-               default:
-                       /* do nothing */
-                       break;
-       }
-
-       /*
         * Loop until we've processed the proper number of tuples from the plan.
         */
-
        for (;;)
        {
                /* Reset the per-output-tuple exprcontext */
@@ -1235,26 +1435,14 @@ ExecutePlan(EState *estate,
                /*
                 * Execute the plan and obtain a tuple
                 */
-lnext: ;
-               if (estate->es_useEvalPlan)
-               {
-                       planSlot = EvalPlanQualNext(estate);
-                       if (TupIsNull(planSlot))
-                               planSlot = ExecProcNode(planstate);
-               }
-               else
-                       planSlot = ExecProcNode(planstate);
+               slot = ExecProcNode(planstate);
 
                /*
                 * if the tuple is null, then we assume there is nothing more to
-                * process so we just return null...
+                * process so we just end the loop...
                 */
-               if (TupIsNull(planSlot))
-               {
-                       result = NULL;
+               if (TupIsNull(slot))
                        break;
-               }
-               slot = planSlot;
 
                /*
                 * If we have a junk filter, then project a new tuple with the junk
@@ -1263,156 +1451,24 @@ lnext: ;
                 * Store this new "clean" tuple in the junkfilter's resultSlot.
                 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
                 * because that tuple slot has the wrong descriptor.)
-                *
-                * But first, extract all the junk information we need.
                 */
-               if ((junkfilter = estate->es_junkFilter) != NULL)
-               {
-                       /*
-                        * Process any FOR UPDATE or FOR SHARE locking requested.
-                        */
-                       if (estate->es_rowMarks != NIL)
-                       {
-                               ListCell   *l;
-
-               lmark:  ;
-                               foreach(l, estate->es_rowMarks)
-                               {
-                                       ExecRowMark *erm = lfirst(l);
-                                       Datum           datum;
-                                       bool            isNull;
-                                       HeapTupleData tuple;
-                                       Buffer          buffer;
-                                       ItemPointerData update_ctid;
-                                       TransactionId update_xmax;
-                                       TupleTableSlot *newSlot;
-                                       LockTupleMode lockmode;
-                                       HTSU_Result test;
-
-                                       datum = ExecGetJunkAttribute(slot,
-                                                                                                erm->ctidAttNo,
-                                                                                                &isNull);
-                                       /* shouldn't ever get a null result... */
-                                       if (isNull)
-                                               elog(ERROR, "ctid is NULL");
-
-                                       tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
-
-                                       if (erm->forUpdate)
-                                               lockmode = LockTupleExclusive;
-                                       else
-                                               lockmode = LockTupleShared;
-
-                                       test = heap_lock_tuple(erm->relation, &tuple, &buffer,
-                                                                                  &update_ctid, &update_xmax,
-                                                                                  estate->es_output_cid,
-                                                                                  lockmode, erm->noWait);
-                                       ReleaseBuffer(buffer);
-                                       switch (test)
-                                       {
-                                               case HeapTupleSelfUpdated:
-                                                       /* treat it as deleted; do not process */
-                                                       goto lnext;
-
-                                               case HeapTupleMayBeUpdated:
-                                                       break;
-
-                                               case HeapTupleUpdated:
-                                                       if (IsXactIsoLevelSerializable)
-                                                               ereport(ERROR,
-                                                                (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
-                                                                 errmsg("could not serialize access due to concurrent update")));
-                                                       if (!ItemPointerEquals(&update_ctid,
-                                                                                                  &tuple.t_self))
-                                                       {
-                                                               /* updated, so look at updated version */
-                                                               newSlot = EvalPlanQual(estate,
-                                                                                                          erm->rti,
-                                                                                                          &update_ctid,
-                                                                                                          update_xmax);
-                                                               if (!TupIsNull(newSlot))
-                                                               {
-                                                                       slot = planSlot = newSlot;
-                                                                       estate->es_useEvalPlan = true;
-                                                                       goto lmark;
-                                                               }
-                                                       }
-
-                                                       /*
-                                                        * if tuple was deleted or PlanQual failed for
-                                                        * updated tuple - we must not return this tuple!
-                                                        */
-                                                       goto lnext;
-
-                                               default:
-                                                       elog(ERROR, "unrecognized heap_lock_tuple status: %u",
-                                                                test);
-                                                       return NULL;
-                                       }
-                               }
-                       }
-
-                       /*
-                        * extract the 'ctid' junk attribute.
-                        */
-                       if (operation == CMD_UPDATE || operation == CMD_DELETE)
-                       {
-                               Datum           datum;
-                               bool            isNull;
-
-                               datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
-                                                                                        &isNull);
-                               /* shouldn't ever get a null result... */
-                               if (isNull)
-                                       elog(ERROR, "ctid is NULL");
-
-                               tupleid = (ItemPointer) DatumGetPointer(datum);
-                               tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
-                               tupleid = &tuple_ctid;
-                       }
-
-                       /*
-                        * Create a new "clean" tuple with all junk attributes removed. We
-                        * don't need to do this for DELETE, however (there will in fact
-                        * be no non-junk attributes in a DELETE!)
-                        */
-                       if (operation != CMD_DELETE)
-                               slot = ExecFilterJunk(junkfilter, slot);
-               }
+               if (estate->es_junkFilter != NULL)
+                       slot = ExecFilterJunk(estate->es_junkFilter, slot);
 
                /*
-                * now that we have a tuple, do the appropriate thing with it.. either
-                * return it to the user, add it to a relation someplace, delete it
-                * from a relation, or modify some of its attributes.
+                * If we are supposed to send the tuple somewhere, do so. (In
+                * practice, this is probably always the case at this point.)
                 */
-               switch (operation)
-               {
-                       case CMD_SELECT:
-                               ExecSelect(slot, dest, estate);
-                               result = slot;
-                               break;
-
-                       case CMD_INSERT:
-                               ExecInsert(slot, tupleid, planSlot, dest, estate);
-                               result = NULL;
-                               break;
-
-                       case CMD_DELETE:
-                               ExecDelete(tupleid, planSlot, dest, estate);
-                               result = NULL;
-                               break;
+               if (sendTuples)
+                       (*dest->receiveSlot) (slot, dest);
 
-                       case CMD_UPDATE:
-                               ExecUpdate(slot, tupleid, planSlot, dest, estate);
-                               result = NULL;
-                               break;
-
-                       default:
-                               elog(ERROR, "unrecognized operation code: %d",
-                                        (int) operation);
-                               result = NULL;
-                               break;
-               }
+               /*
+                * Count tuples processed, if this is a SELECT.  (For other operation
+                * types, the ModifyTable plan node must count the appropriate
+                * events.)
+                */
+               if (operation == CMD_SELECT)
+                       (estate->es_processed)++;
 
                /*
                 * check our tuple count.. if we've processed the proper number then
@@ -1423,444 +1479,8 @@ lnext:  ;
                if (numberTuples && numberTuples == current_tuple_count)
                        break;
        }
-
-       /*
-        * Process AFTER EACH STATEMENT triggers
-        */
-       switch (operation)
-       {
-               case CMD_UPDATE:
-                       ExecASUpdateTriggers(estate, estate->es_result_relation_info);
-                       break;
-               case CMD_DELETE:
-                       ExecASDeleteTriggers(estate, estate->es_result_relation_info);
-                       break;
-               case CMD_INSERT:
-                       ExecASInsertTriggers(estate, estate->es_result_relation_info);
-                       break;
-               default:
-                       /* do nothing */
-                       break;
-       }
-
-       /*
-        * here, result is either a slot containing a tuple in the case of a
-        * SELECT or NULL otherwise.
-        */
-       return result;
 }
 
-/* ----------------------------------------------------------------
- *             ExecSelect
- *
- *             SELECTs are easy.. we just pass the tuple to the appropriate
- *             output function.
- * ----------------------------------------------------------------
- */
-static void
-ExecSelect(TupleTableSlot *slot,
-                  DestReceiver *dest,
-                  EState *estate)
-{
-       (*dest->receiveSlot) (slot, dest);
-       IncrRetrieved();
-       (estate->es_processed)++;
-}
-
-/* ----------------------------------------------------------------
- *             ExecInsert
- *
- *             INSERTs are trickier.. we have to insert the tuple into
- *             the base relation and insert appropriate tuples into the
- *             index relations.
- * ----------------------------------------------------------------
- */
-static void
-ExecInsert(TupleTableSlot *slot,
-                  ItemPointer tupleid,
-                  TupleTableSlot *planSlot,
-                  DestReceiver *dest,
-                  EState *estate)
-{
-       HeapTuple       tuple;
-       ResultRelInfo *resultRelInfo;
-       Relation        resultRelationDesc;
-       Oid                     newId;
-
-       /*
-        * get the heap tuple out of the tuple table slot, making sure we have a
-        * writable copy
-        */
-       tuple = ExecMaterializeSlot(slot);
-
-       /*
-        * get information on the (current) result relation
-        */
-       resultRelInfo = estate->es_result_relation_info;
-       resultRelationDesc = resultRelInfo->ri_RelationDesc;
-
-       /* BEFORE ROW INSERT Triggers */
-       if (resultRelInfo->ri_TrigDesc &&
-               resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
-       {
-               HeapTuple       newtuple;
-
-               newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
-
-               if (newtuple == NULL)   /* "do nothing" */
-                       return;
-
-               if (newtuple != tuple)  /* modified by Trigger(s) */
-               {
-                       /*
-                        * Put the modified tuple into a slot for convenience of routines
-                        * below.  We assume the tuple was allocated in per-tuple memory
-                        * context, and therefore will go away by itself. The tuple table
-                        * slot should not try to clear it.
-                        */
-                       TupleTableSlot *newslot = estate->es_trig_tuple_slot;
-
-                       if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
-                               ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
-                       ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
-                       slot = newslot;
-                       tuple = newtuple;
-               }
-       }
-
-       /*
-        * Check the constraints of the tuple
-        */
-       if (resultRelationDesc->rd_att->constr)
-               ExecConstraints(resultRelInfo, slot, estate);
-
-       /*
-        * insert the tuple
-        *
-        * Note: heap_insert returns the tid (location) of the new tuple in the
-        * t_self field.
-        */
-       newId = heap_insert(resultRelationDesc, tuple,
-                                               estate->es_output_cid,
-                                               true, true);
-
-       IncrAppended();
-       (estate->es_processed)++;
-       estate->es_lastoid = newId;
-       setLastTid(&(tuple->t_self));
-
-       /*
-        * insert index entries for tuple
-        */
-       if (resultRelInfo->ri_NumIndices > 0)
-               ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
-
-       /* AFTER ROW INSERT Triggers */
-       ExecARInsertTriggers(estate, resultRelInfo, tuple);
-
-       /* Process RETURNING if present */
-       if (resultRelInfo->ri_projectReturning)
-               ExecProcessReturning(resultRelInfo->ri_projectReturning,
-                                                        slot, planSlot, dest);
-}
-
-/* ----------------------------------------------------------------
- *             ExecDelete
- *
- *             DELETE is like UPDATE, except that we delete the tuple and no
- *             index modifications are needed
- * ----------------------------------------------------------------
- */
-static void
-ExecDelete(ItemPointer tupleid,
-                  TupleTableSlot *planSlot,
-                  DestReceiver *dest,
-                  EState *estate)
-{
-       ResultRelInfo *resultRelInfo;
-       Relation        resultRelationDesc;
-       HTSU_Result result;
-       ItemPointerData update_ctid;
-       TransactionId update_xmax;
-
-       /*
-        * get information on the (current) result relation
-        */
-       resultRelInfo = estate->es_result_relation_info;
-       resultRelationDesc = resultRelInfo->ri_RelationDesc;
-
-       /* BEFORE ROW DELETE Triggers */
-       if (resultRelInfo->ri_TrigDesc &&
-               resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
-       {
-               bool            dodelete;
-
-               dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid);
-
-               if (!dodelete)                  /* "do nothing" */
-                       return;
-       }
-
-       /*
-        * delete the tuple
-        *
-        * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
-        * the row to be deleted is visible to that snapshot, and throw a can't-
-        * serialize error if not.      This is a special-case behavior needed for
-        * referential integrity updates in serializable transactions.
-        */
-ldelete:;
-       result = heap_delete(resultRelationDesc, tupleid,
-                                                &update_ctid, &update_xmax,
-                                                estate->es_output_cid,
-                                                estate->es_crosscheck_snapshot,
-                                                true /* wait for commit */ );
-       switch (result)
-       {
-               case HeapTupleSelfUpdated:
-                       /* already deleted by self; nothing to do */
-                       return;
-
-               case HeapTupleMayBeUpdated:
-                       break;
-
-               case HeapTupleUpdated:
-                       if (IsXactIsoLevelSerializable)
-                               ereport(ERROR,
-                                               (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
-                                                errmsg("could not serialize access due to concurrent update")));
-                       else if (!ItemPointerEquals(tupleid, &update_ctid))
-                       {
-                               TupleTableSlot *epqslot;
-
-                               epqslot = EvalPlanQual(estate,
-                                                                          resultRelInfo->ri_RangeTableIndex,
-                                                                          &update_ctid,
-                                                                          update_xmax);
-                               if (!TupIsNull(epqslot))
-                               {
-                                       *tupleid = update_ctid;
-                                       goto ldelete;
-                               }
-                       }
-                       /* tuple already deleted; nothing to do */
-                       return;
-
-               default:
-                       elog(ERROR, "unrecognized heap_delete status: %u", result);
-                       return;
-       }
-
-       IncrDeleted();
-       (estate->es_processed)++;
-
-       /*
-        * Note: Normally one would think that we have to delete index tuples
-        * associated with the heap tuple now...
-        *
-        * ... but in POSTGRES, we have no need to do this because VACUUM will
-        * take care of it later.  We can't delete index tuples immediately
-        * anyway, since the tuple is still visible to other transactions.
-        */
-
-       /* AFTER ROW DELETE Triggers */
-       ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
-
-       /* Process RETURNING if present */
-       if (resultRelInfo->ri_projectReturning)
-       {
-               /*
-                * We have to put the target tuple into a slot, which means first we
-                * gotta fetch it.      We can use the trigger tuple slot.
-                */
-               TupleTableSlot *slot = estate->es_trig_tuple_slot;
-               HeapTupleData deltuple;
-               Buffer          delbuffer;
-
-               deltuple.t_self = *tupleid;
-               if (!heap_fetch(resultRelationDesc, SnapshotAny,
-                                               &deltuple, &delbuffer, false, NULL))
-                       elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
-
-               if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
-                       ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
-               ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
-
-               ExecProcessReturning(resultRelInfo->ri_projectReturning,
-                                                        slot, planSlot, dest);
-
-               ExecClearTuple(slot);
-               ReleaseBuffer(delbuffer);
-       }
-}
-
-/* ----------------------------------------------------------------
- *             ExecUpdate
- *
- *             note: we can't run UPDATE queries with transactions
- *             off because UPDATEs are actually INSERTs and our
- *             scan will mistakenly loop forever, updating the tuple
- *             it just inserted..      This should be fixed but until it
- *             is, we don't want to get stuck in an infinite loop
- *             which corrupts your database..
- * ----------------------------------------------------------------
- */
-static void
-ExecUpdate(TupleTableSlot *slot,
-                  ItemPointer tupleid,
-                  TupleTableSlot *planSlot,
-                  DestReceiver *dest,
-                  EState *estate)
-{
-       HeapTuple       tuple;
-       ResultRelInfo *resultRelInfo;
-       Relation        resultRelationDesc;
-       HTSU_Result result;
-       ItemPointerData update_ctid;
-       TransactionId update_xmax;
-
-       /*
-        * abort the operation if not running transactions
-        */
-       if (IsBootstrapProcessingMode())
-               elog(ERROR, "cannot UPDATE during bootstrap");
-
-       /*
-        * get the heap tuple out of the tuple table slot, making sure we have a
-        * writable copy
-        */
-       tuple = ExecMaterializeSlot(slot);
-
-       /*
-        * get information on the (current) result relation
-        */
-       resultRelInfo = estate->es_result_relation_info;
-       resultRelationDesc = resultRelInfo->ri_RelationDesc;
-
-       /* BEFORE ROW UPDATE Triggers */
-       if (resultRelInfo->ri_TrigDesc &&
-               resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
-       {
-               HeapTuple       newtuple;
-
-               newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
-                                                                               tupleid, tuple);
-
-               if (newtuple == NULL)   /* "do nothing" */
-                       return;
-
-               if (newtuple != tuple)  /* modified by Trigger(s) */
-               {
-                       /*
-                        * Put the modified tuple into a slot for convenience of routines
-                        * below.  We assume the tuple was allocated in per-tuple memory
-                        * context, and therefore will go away by itself. The tuple table
-                        * slot should not try to clear it.
-                        */
-                       TupleTableSlot *newslot = estate->es_trig_tuple_slot;
-
-                       if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
-                               ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
-                       ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
-                       slot = newslot;
-                       tuple = newtuple;
-               }
-       }
-
-       /*
-        * Check the constraints of the tuple
-        *
-        * If we generate a new candidate tuple after EvalPlanQual testing, we
-        * must loop back here and recheck constraints.  (We don't need to redo
-        * triggers, however.  If there are any BEFORE triggers then trigger.c
-        * will have done heap_lock_tuple to lock the correct tuple, so there's no
-        * need to do them again.)
-        */
-lreplace:;
-       if (resultRelationDesc->rd_att->constr)
-               ExecConstraints(resultRelInfo, slot, estate);
-
-       /*
-        * replace the heap tuple
-        *
-        * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
-        * the row to be updated is visible to that snapshot, and throw a can't-
-        * serialize error if not.      This is a special-case behavior needed for
-        * referential integrity updates in serializable transactions.
-        */
-       result = heap_update(resultRelationDesc, tupleid, tuple,
-                                                &update_ctid, &update_xmax,
-                                                estate->es_output_cid,
-                                                estate->es_crosscheck_snapshot,
-                                                true /* wait for commit */ );
-       switch (result)
-       {
-               case HeapTupleSelfUpdated:
-                       /* already deleted by self; nothing to do */
-                       return;
-
-               case HeapTupleMayBeUpdated:
-                       break;
-
-               case HeapTupleUpdated:
-                       if (IsXactIsoLevelSerializable)
-                               ereport(ERROR,
-                                               (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
-                                                errmsg("could not serialize access due to concurrent update")));
-                       else if (!ItemPointerEquals(tupleid, &update_ctid))
-                       {
-                               TupleTableSlot *epqslot;
-
-                               epqslot = EvalPlanQual(estate,
-                                                                          resultRelInfo->ri_RangeTableIndex,
-                                                                          &update_ctid,
-                                                                          update_xmax);
-                               if (!TupIsNull(epqslot))
-                               {
-                                       *tupleid = update_ctid;
-                                       slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
-                                       tuple = ExecMaterializeSlot(slot);
-                                       goto lreplace;
-                               }
-                       }
-                       /* tuple already deleted; nothing to do */
-                       return;
-
-               default:
-                       elog(ERROR, "unrecognized heap_update status: %u", result);
-                       return;
-       }
-
-       IncrReplaced();
-       (estate->es_processed)++;
-
-       /*
-        * Note: instead of having to update the old index tuples associated with
-        * the heap tuple, all we do is form and insert new index tuples. This is
-        * because UPDATEs are actually DELETEs and INSERTs, and index tuple
-        * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
-        * here is insert new index tuples.  -cim 9/27/89
-        */
-
-       /*
-        * insert index entries for tuple
-        *
-        * Note: heap_update returns the tid (location) of the new tuple in the
-        * t_self field.
-        *
-        * If it's a HOT update, we mustn't insert new index entries.
-        */
-       if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
-               ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
-
-       /* AFTER ROW UPDATE Triggers */
-       ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
-
-       /* Process RETURNING if present */
-       if (resultRelInfo->ri_projectReturning)
-               ExecProcessReturning(resultRelInfo->ri_projectReturning,
-                                                        slot, planSlot, dest);
-}
 
 /*
  * ExecRelCheck --- check that tuple meets constraints for result relation
@@ -1961,98 +1581,196 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
        }
 }
 
-/*
- * ExecProcessReturning --- evaluate a RETURNING list and send to dest
- *
- * projectReturning: RETURNING projection info for current result rel
- * tupleSlot: slot holding tuple actually inserted/updated/deleted
- * planSlot: slot holding tuple returned by top plan node
- * dest: where to send the output
- */
-static void
-ExecProcessReturning(ProjectionInfo *projectReturning,
-                                        TupleTableSlot *tupleSlot,
-                                        TupleTableSlot *planSlot,
-                                        DestReceiver *dest)
-{
-       ExprContext *econtext = projectReturning->pi_exprContext;
-       TupleTableSlot *retSlot;
+
+/*
+ * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
+ */
+ExecRowMark *
+ExecFindRowMark(EState *estate, Index rti)
+{
+       ListCell   *lc;
+
+       foreach(lc, estate->es_rowMarks)
+       {
+               ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
+
+               if (erm->rti == rti)
+                       return erm;
+       }
+       elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
+       return NULL;                            /* keep compiler quiet */
+}
+
+/*
+ * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
+ *
+ * Inputs are the underlying ExecRowMark struct and the targetlist of the
+ * input plan node (not planstate node!).  We need the latter to find out
+ * the column numbers of the resjunk columns.
+ */
+ExecAuxRowMark *
+ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
+{
+       ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
+       char            resname[32];
+
+       aerm->rowmark = erm;
+
+       /* Look up the resjunk columns associated with this rowmark */
+       if (erm->relation)
+       {
+               Assert(erm->markType != ROW_MARK_COPY);
+
+               /* if child rel, need tableoid */
+               if (erm->rti != erm->prti)
+               {
+                       snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
+                       aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
+                                                                                                                  resname);
+                       if (!AttributeNumberIsValid(aerm->toidAttNo))
+                               elog(ERROR, "could not find junk %s column", resname);
+               }
+
+               /* always need ctid for real relations */
+               snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
+               aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
+                                                                                                          resname);
+               if (!AttributeNumberIsValid(aerm->ctidAttNo))
+                       elog(ERROR, "could not find junk %s column", resname);
+       }
+       else
+       {
+               Assert(erm->markType == ROW_MARK_COPY);
+
+               snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
+               aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
+                                                                                                               resname);
+               if (!AttributeNumberIsValid(aerm->wholeAttNo))
+                       elog(ERROR, "could not find junk %s column", resname);
+       }
+
+       return aerm;
+}
+
+
+/*
+ * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
+ * process the updated version under READ COMMITTED rules.
+ *
+ * See backend/executor/README for some info about how this works.
+ */
+
+
+/*
+ * Check a modified tuple to see if we want to process its updated version
+ * under READ COMMITTED rules.
+ *
+ *     estate - outer executor state data
+ *     epqstate - state for EvalPlanQual rechecking
+ *     relation - table containing tuple
+ *     rti - rangetable index of table containing tuple
+ *     *tid - t_ctid from the outdated tuple (ie, next updated version)
+ *     priorXmax - t_xmax from the outdated tuple
+ *
+ * *tid is also an output parameter: it's modified to hold the TID of the
+ * latest version of the tuple (note this may be changed even on failure)
+ *
+ * Returns a slot containing the new candidate update/delete tuple, or
+ * NULL if we determine we shouldn't process the row.
+ */
+TupleTableSlot *
+EvalPlanQual(EState *estate, EPQState *epqstate,
+                        Relation relation, Index rti,
+                        ItemPointer tid, TransactionId priorXmax)
+{
+       TupleTableSlot *slot;
+       HeapTuple       copyTuple;
+
+       Assert(rti > 0);
+
+       /*
+        * Get and lock the updated version of the row; if fail, return NULL.
+        */
+       copyTuple = EvalPlanQualFetch(estate, relation, LockTupleExclusive,
+                                                                 tid, priorXmax);
+
+       if (copyTuple == NULL)
+               return NULL;
+
+       /*
+        * For UPDATE/DELETE we have to return tid of actual row we're executing
+        * PQ for.
+        */
+       *tid = copyTuple->t_self;
+
+       /*
+        * Need to run a recheck subquery.      Initialize or reinitialize EPQ state.
+        */
+       EvalPlanQualBegin(epqstate, estate);
+
+       /*
+        * Free old test tuple, if any, and store new tuple where relation's scan
+        * node will see it
+        */
+       EvalPlanQualSetTuple(epqstate, rti, copyTuple);
 
        /*
-        * Reset per-tuple memory context to free any expression evaluation
-        * storage allocated in the previous cycle.
+        * Fetch any non-locked source rows
         */
-       ResetExprContext(econtext);
+       EvalPlanQualFetchRowMarks(epqstate);
 
-       /* Make tuple and any needed join variables available to ExecProject */
-       econtext->ecxt_scantuple = tupleSlot;
-       econtext->ecxt_outertuple = planSlot;
+       /*
+        * Run the EPQ query.  We assume it will return at most one tuple.
+        */
+       slot = EvalPlanQualNext(epqstate);
 
-       /* Compute the RETURNING expressions */
-       retSlot = ExecProject(projectReturning, NULL);
+       /*
+        * If we got a tuple, force the slot to materialize the tuple so that it
+        * is not dependent on any local state in the EPQ query (in particular,
+        * it's highly likely that the slot contains references to any pass-by-ref
+        * datums that may be present in copyTuple).  As with the next step, this
+        * is to guard against early re-use of the EPQ query.
+        */
+       if (!TupIsNull(slot))
+               (void) ExecMaterializeSlot(slot);
 
-       /* Send to dest */
-       (*dest->receiveSlot) (retSlot, dest);
+       /*
+        * Clear out the test tuple.  This is needed in case the EPQ query is
+        * re-used to test a tuple for a different relation.  (Not clear that can
+        * really happen, but let's be safe.)
+        */
+       EvalPlanQualSetTuple(epqstate, rti, NULL);
 
-       ExecClearTuple(retSlot);
+       return slot;
 }
 
 /*
- * Check a modified tuple to see if we want to process its updated version
- * under READ COMMITTED rules.
- *
- * See backend/executor/README for some info about how this works.
+ * Fetch a copy of the newest version of an outdated tuple
  *
  *     estate - executor state data
- *     rti - rangetable index of table containing tuple
+ *     relation - table containing tuple
+ *     lockmode - requested tuple lock mode
  *     *tid - t_ctid from the outdated tuple (ie, next updated version)
  *     priorXmax - t_xmax from the outdated tuple
  *
- * *tid is also an output parameter: it's modified to hold the TID of the
- * latest version of the tuple (note this may be changed even on failure)
+ * Returns a palloc'd copy of the newest tuple version, or NULL if we find
+ * that there is no newest version (ie, the row was deleted not updated).
+ * If successful, we have locked the newest tuple version, so caller does not
+ * need to worry about it changing anymore.
  *
- * Returns a slot containing the new candidate update/delete tuple, or
- * NULL if we determine we shouldn't process the row.
+ * Note: properly, lockmode should be declared as enum LockTupleMode,
+ * but we use "int" to avoid having to include heapam.h in executor.h.
  */
-TupleTableSlot *
-EvalPlanQual(EState *estate, Index rti,
-                        ItemPointer tid, TransactionId priorXmax)
+HeapTuple
+EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
+                                 ItemPointer tid, TransactionId priorXmax)
 {
-       evalPlanQual *epq;
-       EState     *epqstate;
-       Relation        relation;
-       HeapTupleData tuple;
        HeapTuple       copyTuple = NULL;
+       HeapTupleData tuple;
        SnapshotData SnapshotDirty;
-       bool            endNode;
-
-       Assert(rti != 0);
-
-       /*
-        * find relation containing target tuple
-        */
-       if (estate->es_result_relation_info != NULL &&
-               estate->es_result_relation_info->ri_RangeTableIndex == rti)
-               relation = estate->es_result_relation_info->ri_RelationDesc;
-       else
-       {
-               ListCell   *l;
-
-               relation = NULL;
-               foreach(l, estate->es_rowMarks)
-               {
-                       if (((ExecRowMark *) lfirst(l))->rti == rti)
-                       {
-                               relation = ((ExecRowMark *) lfirst(l))->relation;
-                               break;
-                       }
-               }
-               if (relation == NULL)
-                       elog(ERROR, "could not find RowMark for RT index %u", rti);
-       }
 
        /*
-        * fetch tid tuple
+        * fetch target tuple
         *
         * Loop here to deal with updated or busy tuples
         */
@@ -2064,6 +1782,10 @@ EvalPlanQual(EState *estate, Index rti,
 
                if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
                {
+                       HTSU_Result test;
+                       ItemPointerData update_ctid;
+                       TransactionId update_xmax;
+
                        /*
                         * If xmin isn't what we're expecting, the slot must have been
                         * recycled and reused for an unrelated tuple.  This implies that
@@ -2113,6 +1835,51 @@ EvalPlanQual(EState *estate, Index rti,
                        }
 
                        /*
+                        * This is a live tuple, so now try to lock it.
+                        */
+                       test = heap_lock_tuple(relation, &tuple, &buffer,
+                                                                  &update_ctid, &update_xmax,
+                                                                  estate->es_output_cid,
+                                                                  lockmode, false);
+                       /* We now have two pins on the buffer, get rid of one */
+                       ReleaseBuffer(buffer);
+
+                       switch (test)
+                       {
+                               case HeapTupleSelfUpdated:
+                                       /* treat it as deleted; do not process */
+                                       ReleaseBuffer(buffer);
+                                       return NULL;
+
+                               case HeapTupleMayBeUpdated:
+                                       /* successfully locked */
+                                       break;
+
+                               case HeapTupleUpdated:
+                                       ReleaseBuffer(buffer);
+                                       if (IsolationUsesXactSnapshot())
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+                                                                errmsg("could not serialize access due to concurrent update")));
+                                       if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
+                                       {
+                                               /* it was updated, so look at the updated version */
+                                               tuple.t_self = update_ctid;
+                                               /* updated row should have xmin matching this xmax */
+                                               priorXmax = update_xmax;
+                                               continue;
+                                       }
+                                       /* tuple was deleted, so give up */
+                                       return NULL;
+
+                               default:
+                                       ReleaseBuffer(buffer);
+                                       elog(ERROR, "unrecognized heap_lock_tuple status: %u",
+                                                test);
+                                       return NULL;    /* keep compiler quiet */
+                       }
+
+                       /*
                         * We got tuple - now copy it for use by recheck query.
                         */
                        copyTuple = heap_copytuple(&tuple);
@@ -2147,7 +1914,7 @@ EvalPlanQual(EState *estate, Index rti,
                 * mean that the row was updated or deleted by either a committed xact
                 * or our own xact.  If it was deleted, we can ignore it; if it was
                 * updated then chain up to the next version and repeat the whole
-                * test.
+                * process.
                 *
                 * As above, it should be safe to examine xmax and t_ctid without the
                 * buffer content lock, because they can't be changing.
@@ -2168,332 +1935,412 @@ EvalPlanQual(EState *estate, Index rti,
        }
 
        /*
-        * For UPDATE/DELETE we have to return tid of actual row we're executing
-        * PQ for.
+        * Return the copied tuple
         */
-       *tid = tuple.t_self;
+       return copyTuple;
+}
 
-       /*
-        * Need to run a recheck subquery.      Find or create a PQ stack entry.
-        */
-       epq = estate->es_evalPlanQual;
-       endNode = true;
+/*
+ * EvalPlanQualInit -- initialize during creation of a plan state node
+ * that might need to invoke EPQ processing.
+ *
+ * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
+ * with EvalPlanQualSetPlan.
+ */
+void
+EvalPlanQualInit(EPQState *epqstate, EState *estate,
+                                Plan *subplan, List *auxrowmarks, int epqParam)
+{
+       /* Mark the EPQ state inactive */
+       epqstate->estate = NULL;
+       epqstate->planstate = NULL;
+       epqstate->origslot = NULL;
+       /* ... and remember data that EvalPlanQualBegin will need */
+       epqstate->plan = subplan;
+       epqstate->arowMarks = auxrowmarks;
+       epqstate->epqParam = epqParam;
+}
 
-       if (epq != NULL && epq->rti == 0)
-       {
-               /* Top PQ stack entry is idle, so re-use it */
-               Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
-               epq->rti = rti;
-               endNode = false;
-       }
+/*
+ * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
+ *
+ * We need this so that ModifyTuple can deal with multiple subplans.
+ */
+void
+EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
+{
+       /* If we have a live EPQ query, shut it down */
+       EvalPlanQualEnd(epqstate);
+       /* And set/change the plan pointer */
+       epqstate->plan = subplan;
+       /* The rowmarks depend on the plan, too */
+       epqstate->arowMarks = auxrowmarks;
+}
 
-       /*
-        * If this is request for another RTE - Ra, - then we have to check wasn't
-        * PlanQual requested for Ra already and if so then Ra' row was updated
-        * again and we have to re-start old execution for Ra and forget all what
-        * we done after Ra was suspended. Cool? -:))
-        */
-       if (epq != NULL && epq->rti != rti &&
-               epq->estate->es_evTuple[rti - 1] != NULL)
-       {
-               do
-               {
-                       evalPlanQual *oldepq;
-
-                       /* stop execution */
-                       EvalPlanQualStop(epq);
-                       /* pop previous PlanQual from the stack */
-                       oldepq = epq->next;
-                       Assert(oldepq && oldepq->rti != 0);
-                       /* push current PQ to freePQ stack */
-                       oldepq->free = epq;
-                       epq = oldepq;
-                       estate->es_evalPlanQual = epq;
-               } while (epq->rti != rti);
-       }
+/*
+ * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
+ *
+ * NB: passed tuple must be palloc'd; it may get freed later
+ */
+void
+EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
+{
+       EState     *estate = epqstate->estate;
+
+       Assert(rti > 0);
 
        /*
-        * If we are requested for another RTE then we have to suspend execution
-        * of current PlanQual and start execution for new one.
+        * free old test tuple, if any, and store new tuple where relation's scan
+        * node will see it
         */
-       if (epq == NULL || epq->rti != rti)
-       {
-               /* try to reuse plan used previously */
-               evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
+       if (estate->es_epqTuple[rti - 1] != NULL)
+               heap_freetuple(estate->es_epqTuple[rti - 1]);
+       estate->es_epqTuple[rti - 1] = tuple;
+       estate->es_epqTupleSet[rti - 1] = true;
+}
 
-               if (newepq == NULL)             /* first call or freePQ stack is empty */
-               {
-                       newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
-                       newepq->free = NULL;
-                       newepq->estate = NULL;
-                       newepq->planstate = NULL;
-               }
-               else
-               {
-                       /* recycle previously used PlanQual */
-                       Assert(newepq->estate == NULL);
-                       epq->free = NULL;
-               }
-               /* push current PQ to the stack */
-               newepq->next = epq;
-               epq = newepq;
-               estate->es_evalPlanQual = epq;
-               epq->rti = rti;
-               endNode = false;
-       }
+/*
+ * Fetch back the current test tuple (if any) for the specified RTI
+ */
+HeapTuple
+EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
+{
+       EState     *estate = epqstate->estate;
 
-       Assert(epq->rti == rti);
+       Assert(rti > 0);
 
-       /*
-        * Ok - we're requested for the same RTE.  Unfortunately we still have to
-        * end and restart execution of the plan, because ExecReScan wouldn't
-        * ensure that upper plan nodes would reset themselves.  We could make
-        * that work if insertion of the target tuple were integrated with the
-        * Param mechanism somehow, so that the upper plan nodes know that their
-        * children's outputs have changed.
-        *
-        * Note that the stack of free evalPlanQual nodes is quite useless at the
-        * moment, since it only saves us from pallocing/releasing the
-        * evalPlanQual nodes themselves.  But it will be useful once we implement
-        * ReScan instead of end/restart for re-using PlanQual nodes.
-        */
-       if (endNode)
+       return estate->es_epqTuple[rti - 1];
+}
+
+/*
+ * Fetch the current row values for any non-locked relations that need
+ * to be scanned by an EvalPlanQual operation. origslot must have been set
+ * to contain the current result row (top-level row) that we need to recheck.
+ */
+void
+EvalPlanQualFetchRowMarks(EPQState *epqstate)
+{
+       ListCell   *l;
+
+       Assert(epqstate->origslot != NULL);
+
+       foreach(l, epqstate->arowMarks)
        {
-               /* stop execution */
-               EvalPlanQualStop(epq);
-       }
+               ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
+               ExecRowMark *erm = aerm->rowmark;
+               Datum           datum;
+               bool            isNull;
+               HeapTupleData tuple;
 
-       /*
-        * Initialize new recheck query.
-        *
-        * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
-        * instead copy down changeable state from the top plan (including
-        * es_result_relation_info, es_junkFilter) and reset locally changeable
-        * state in the epq (including es_param_exec_vals, es_evTupleNull).
-        */
-       EvalPlanQualStart(epq, estate, epq->next);
+               if (RowMarkRequiresRowShareLock(erm->markType))
+                       elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
 
-       /*
-        * free old RTE' tuple, if any, and store target tuple where relation's
-        * scan node will see it
-        */
-       epqstate = epq->estate;
-       if (epqstate->es_evTuple[rti - 1] != NULL)
-               heap_freetuple(epqstate->es_evTuple[rti - 1]);
-       epqstate->es_evTuple[rti - 1] = copyTuple;
+               /* clear any leftover test tuple for this rel */
+               EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
 
-       return EvalPlanQualNext(estate);
-}
+               if (erm->relation)
+               {
+                       Buffer          buffer;
 
-static TupleTableSlot *
-EvalPlanQualNext(EState *estate)
-{
-       evalPlanQual *epq = estate->es_evalPlanQual;
-       MemoryContext oldcontext;
-       TupleTableSlot *slot;
+                       Assert(erm->markType == ROW_MARK_REFERENCE);
 
-       Assert(epq->rti != 0);
+                       /* if child rel, must check whether it produced this row */
+                       if (erm->rti != erm->prti)
+                       {
+                               Oid                     tableoid;
 
-lpqnext:;
-       oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
-       slot = ExecProcNode(epq->planstate);
-       MemoryContextSwitchTo(oldcontext);
+                               datum = ExecGetJunkAttribute(epqstate->origslot,
+                                                                                        aerm->toidAttNo,
+                                                                                        &isNull);
+                               /* non-locked rels could be on the inside of outer joins */
+                               if (isNull)
+                                       continue;
+                               tableoid = DatumGetObjectId(datum);
 
-       /*
-        * No more tuples for this PQ. Continue previous one.
-        */
-       if (TupIsNull(slot))
-       {
-               evalPlanQual *oldepq;
+                               if (tableoid != RelationGetRelid(erm->relation))
+                               {
+                                       /* this child is inactive right now */
+                                       continue;
+                               }
+                       }
 
-               /* stop execution */
-               EvalPlanQualStop(epq);
-               /* pop old PQ from the stack */
-               oldepq = epq->next;
-               if (oldepq == NULL)
+                       /* fetch the tuple's ctid */
+                       datum = ExecGetJunkAttribute(epqstate->origslot,
+                                                                                aerm->ctidAttNo,
+                                                                                &isNull);
+                       /* non-locked rels could be on the inside of outer joins */
+                       if (isNull)
+                               continue;
+                       tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
+
+                       /* okay, fetch the tuple */
+                       if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
+                                                       false, NULL))
+                               elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+
+                       /* successful, copy and store tuple */
+                       EvalPlanQualSetTuple(epqstate, erm->rti,
+                                                                heap_copytuple(&tuple));
+                       ReleaseBuffer(buffer);
+               }
+               else
                {
-                       /* this is the first (oldest) PQ - mark as free */
-                       epq->rti = 0;
-                       estate->es_useEvalPlan = false;
-                       /* and continue Query execution */
-                       return NULL;
+                       HeapTupleHeader td;
+
+                       Assert(erm->markType == ROW_MARK_COPY);
+
+                       /* fetch the whole-row Var for the relation */
+                       datum = ExecGetJunkAttribute(epqstate->origslot,
+                                                                                aerm->wholeAttNo,
+                                                                                &isNull);
+                       /* non-locked rels could be on the inside of outer joins */
+                       if (isNull)
+                               continue;
+                       td = DatumGetHeapTupleHeader(datum);
+
+                       /* build a temporary HeapTuple control structure */
+                       tuple.t_len = HeapTupleHeaderGetDatumLength(td);
+                       ItemPointerSetInvalid(&(tuple.t_self));
+                       tuple.t_tableOid = InvalidOid;
+                       tuple.t_data = td;
+
+                       /* copy and store tuple */
+                       EvalPlanQualSetTuple(epqstate, erm->rti,
+                                                                heap_copytuple(&tuple));
                }
-               Assert(oldepq->rti != 0);
-               /* push current PQ to freePQ stack */
-               oldepq->free = epq;
-               epq = oldepq;
-               estate->es_evalPlanQual = epq;
-               goto lpqnext;
        }
+}
+
+/*
+ * Fetch the next row (if any) from EvalPlanQual testing
+ *
+ * (In practice, there should never be more than one row...)
+ */
+TupleTableSlot *
+EvalPlanQualNext(EPQState *epqstate)
+{
+       MemoryContext oldcontext;
+       TupleTableSlot *slot;
+
+       oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
+       slot = ExecProcNode(epqstate->planstate);
+       MemoryContextSwitchTo(oldcontext);
 
        return slot;
 }
 
-static void
-EndEvalPlanQual(EState *estate)
+/*
+ * Initialize or reset an EvalPlanQual state tree
+ */
+void
+EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
 {
-       evalPlanQual *epq = estate->es_evalPlanQual;
+       EState     *estate = epqstate->estate;
 
-       if (epq->rti == 0)                      /* plans already shutdowned */
+       if (estate == NULL)
        {
-               Assert(epq->next == NULL);
-               return;
+               /* First time through, so create a child EState */
+               EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
        }
-
-       for (;;)
+       else
        {
-               evalPlanQual *oldepq;
+               /*
+                * We already have a suitable child EPQ tree, so just reset it.
+                */
+               int                     rtsize = list_length(parentestate->es_range_table);
+               PlanState  *planstate = epqstate->planstate;
 
-               /* stop execution */
-               EvalPlanQualStop(epq);
-               /* pop old PQ from the stack */
-               oldepq = epq->next;
-               if (oldepq == NULL)
+               MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
+
+               /* Recopy current values of parent parameters */
+               if (parentestate->es_plannedstmt->nParamExec > 0)
                {
-                       /* this is the first (oldest) PQ - mark as free */
-                       epq->rti = 0;
-                       estate->es_useEvalPlan = false;
-                       break;
+                       int                     i = parentestate->es_plannedstmt->nParamExec;
+
+                       while (--i >= 0)
+                       {
+                               /* copy value if any, but not execPlan link */
+                               estate->es_param_exec_vals[i].value =
+                                       parentestate->es_param_exec_vals[i].value;
+                               estate->es_param_exec_vals[i].isnull =
+                                       parentestate->es_param_exec_vals[i].isnull;
+                       }
                }
-               Assert(oldepq->rti != 0);
-               /* push current PQ to freePQ stack */
-               oldepq->free = epq;
-               epq = oldepq;
-               estate->es_evalPlanQual = epq;
+
+               /*
+                * Mark child plan tree as needing rescan at all scan nodes.  The
+                * first ExecProcNode will take care of actually doing the rescan.
+                */
+               planstate->chgParam = bms_add_member(planstate->chgParam,
+                                                                                        epqstate->epqParam);
        }
 }
 
 /*
- * Start execution of one level of PlanQual.
+ * Start execution of an EvalPlanQual plan tree.
  *
  * This is a cut-down version of ExecutorStart(): we copy some state from
  * the top-level estate rather than initializing it fresh.
  */
 static void
-EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
+EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
 {
-       EState     *epqstate;
+       EState     *estate;
        int                     rtsize;
        MemoryContext oldcontext;
        ListCell   *l;
 
-       rtsize = list_length(estate->es_range_table);
+       rtsize = list_length(parentestate->es_range_table);
 
-       epq->estate = epqstate = CreateExecutorState();
+       epqstate->estate = estate = CreateExecutorState();
 
-       oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
 
        /*
-        * The epqstates share the top query's copy of unchanging state such as
+        * Child EPQ EStates share the parent's copy of unchanging state such as
         * the snapshot, rangetable, result-rel info, and external Param info.
         * They need their own copies of local state, including a tuple table,
         * es_param_exec_vals, etc.
         */
-       epqstate->es_direction = ForwardScanDirection;
-       epqstate->es_snapshot = estate->es_snapshot;
-       epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
-       epqstate->es_range_table = estate->es_range_table;
-       epqstate->es_output_cid = estate->es_output_cid;
-       epqstate->es_result_relations = estate->es_result_relations;
-       epqstate->es_num_result_relations = estate->es_num_result_relations;
-       epqstate->es_result_relation_info = estate->es_result_relation_info;
-       epqstate->es_junkFilter = estate->es_junkFilter;
+       estate->es_direction = ForwardScanDirection;
+       estate->es_snapshot = parentestate->es_snapshot;
+       estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
+       estate->es_range_table = parentestate->es_range_table;
+       estate->es_plannedstmt = parentestate->es_plannedstmt;
+       estate->es_junkFilter = parentestate->es_junkFilter;
+       estate->es_output_cid = parentestate->es_output_cid;
+       estate->es_result_relations = parentestate->es_result_relations;
+       estate->es_num_result_relations = parentestate->es_num_result_relations;
+       estate->es_result_relation_info = parentestate->es_result_relation_info;
        /* es_trig_target_relations must NOT be copied */
-       epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
-       epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
-       epqstate->es_param_list_info = estate->es_param_list_info;
-       if (estate->es_plannedstmt->nParamExec > 0)
-               epqstate->es_param_exec_vals = (ParamExecData *)
-                       palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
-       epqstate->es_rowMarks = estate->es_rowMarks;
-       epqstate->es_instrument = estate->es_instrument;
-       epqstate->es_select_into = estate->es_select_into;
-       epqstate->es_into_oids = estate->es_into_oids;
-       epqstate->es_plannedstmt = estate->es_plannedstmt;
-
-       /*
-        * Each epqstate must have its own es_evTupleNull state, but all the stack
-        * entries share es_evTuple state.      This allows sub-rechecks to inherit
-        * the value being examined by an outer recheck.
-        */
-       epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
-       if (priorepq == NULL)
-               /* first PQ stack entry */
-               epqstate->es_evTuple = (HeapTuple *)
-                       palloc0(rtsize * sizeof(HeapTuple));
+       estate->es_rowMarks = parentestate->es_rowMarks;
+       estate->es_top_eflags = parentestate->es_top_eflags;
+       estate->es_instrument = parentestate->es_instrument;
+       estate->es_select_into = parentestate->es_select_into;
+       estate->es_into_oids = parentestate->es_into_oids;
+       /* es_auxmodifytables must NOT be copied */
+
+       /*
+        * The external param list is simply shared from parent.  The internal
+        * param workspace has to be local state, but we copy the initial values
+        * from the parent, so as to have access to any param values that were
+        * already set from other parts of the parent's plan tree.
+        */
+       estate->es_param_list_info = parentestate->es_param_list_info;
+       if (parentestate->es_plannedstmt->nParamExec > 0)
+       {
+               int                     i = parentestate->es_plannedstmt->nParamExec;
+
+               estate->es_param_exec_vals = (ParamExecData *)
+                       palloc0(i * sizeof(ParamExecData));
+               while (--i >= 0)
+               {
+                       /* copy value if any, but not execPlan link */
+                       estate->es_param_exec_vals[i].value =
+                               parentestate->es_param_exec_vals[i].value;
+                       estate->es_param_exec_vals[i].isnull =
+                               parentestate->es_param_exec_vals[i].isnull;
+               }
+       }
+
+       /*
+        * Each EState must have its own es_epqScanDone state, but if we have
+        * nested EPQ checks they should share es_epqTuple arrays.      This allows
+        * sub-rechecks to inherit the values being examined by an outer recheck.
+        */
+       estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
+       if (parentestate->es_epqTuple != NULL)
+       {
+               estate->es_epqTuple = parentestate->es_epqTuple;
+               estate->es_epqTupleSet = parentestate->es_epqTupleSet;
+       }
        else
-               /* later stack entries share the same storage */
-               epqstate->es_evTuple = priorepq->estate->es_evTuple;
+       {
+               estate->es_epqTuple = (HeapTuple *)
+                       palloc0(rtsize * sizeof(HeapTuple));
+               estate->es_epqTupleSet = (bool *)
+                       palloc0(rtsize * sizeof(bool));
+       }
 
        /*
-        * Create sub-tuple-table; we needn't redo the CountSlots work though.
+        * Each estate also has its own tuple table.
         */
-       epqstate->es_tupleTable =
-               ExecCreateTupleTable(estate->es_tupleTable->size);
+       estate->es_tupleTable = NIL;
 
        /*
         * Initialize private state information for each SubPlan.  We must do this
         * before running ExecInitNode on the main query tree, since
-        * ExecInitSubPlan expects to be able to find these entries.
+        * ExecInitSubPlan expects to be able to find these entries. Some of the
+        * SubPlans might not be used in the part of the plan tree we intend to
+        * run, but since it's not easy to tell which, we just initialize them
+        * all.  (However, if the subplan is headed by a ModifyTable node, then it
+        * must be a data-modifying CTE, which we will certainly not need to
+        * re-run, so we can skip initializing it.      This is just an efficiency
+        * hack; it won't skip data-modifying CTEs for which the ModifyTable node
+        * is not at the top.)
         */
-       Assert(epqstate->es_subplanstates == NIL);
-       foreach(l, estate->es_plannedstmt->subplans)
+       Assert(estate->es_subplanstates == NIL);
+       foreach(l, parentestate->es_plannedstmt->subplans)
        {
                Plan       *subplan = (Plan *) lfirst(l);
                PlanState  *subplanstate;
 
-               subplanstate = ExecInitNode(subplan, epqstate, 0);
+               /* Don't initialize ModifyTable subplans, per comment above */
+               if (IsA(subplan, ModifyTable))
+                       subplanstate = NULL;
+               else
+                       subplanstate = ExecInitNode(subplan, estate, 0);
 
-               epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
-                                                                                        subplanstate);
+               estate->es_subplanstates = lappend(estate->es_subplanstates,
+                                                                                  subplanstate);
        }
 
        /*
-        * Initialize the private state information for all the nodes in the query
-        * tree.  This opens files, allocates storage and leaves us ready to start
-        * processing tuples.
+        * Initialize the private state information for all the nodes in the part
+        * of the plan tree we need to run.  This opens files, allocates storage
+        * and leaves us ready to start processing tuples.
         */
-       epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
+       epqstate->planstate = ExecInitNode(planTree, estate, 0);
 
        MemoryContextSwitchTo(oldcontext);
 }
 
 /*
- * End execution of one level of PlanQual.
+ * EvalPlanQualEnd -- shut down at termination of parent plan state node,
+ * or if we are done with the current EPQ child.
  *
  * This is a cut-down version of ExecutorEnd(); basically we want to do most
  * of the normal cleanup, but *not* close result relations (which we are
  * just sharing from the outer query). We do, however, have to close any
  * trigger target relations that got opened, since those are not shared.
+ * (There probably shouldn't be any of the latter, but just in case...)
  */
-static void
-EvalPlanQualStop(evalPlanQual *epq)
+void
+EvalPlanQualEnd(EPQState *epqstate)
 {
-       EState     *epqstate = epq->estate;
+       EState     *estate = epqstate->estate;
        MemoryContext oldcontext;
        ListCell   *l;
 
-       oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
+       if (estate == NULL)
+               return;                                 /* idle, so nothing to do */
+
+       oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
 
-       ExecEndNode(epq->planstate);
+       ExecEndNode(epqstate->planstate);
 
-       foreach(l, epqstate->es_subplanstates)
+       foreach(l, estate->es_subplanstates)
        {
                PlanState  *subplanstate = (PlanState *) lfirst(l);
 
                ExecEndNode(subplanstate);
        }
 
-       ExecDropTupleTable(epqstate->es_tupleTable, true);
-       epqstate->es_tupleTable = NULL;
-
-       if (epqstate->es_evTuple[epq->rti - 1] != NULL)
-       {
-               heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
-               epqstate->es_evTuple[epq->rti - 1] = NULL;
-       }
+       /* throw away the per-estate tuple table */
+       ExecResetTupleTable(estate->es_tupleTable, false);
 
-       foreach(l, epqstate->es_trig_target_relations)
+       /* close any trigger target relations attached to this EState */
+       foreach(l, estate->es_trig_target_relations)
        {
                ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
 
@@ -2504,28 +2351,12 @@ EvalPlanQualStop(evalPlanQual *epq)
 
        MemoryContextSwitchTo(oldcontext);
 
-       FreeExecutorState(epqstate);
-
-       epq->estate = NULL;
-       epq->planstate = NULL;
-}
-
-/*
- * ExecGetActivePlanTree --- get the active PlanState tree from a QueryDesc
- *
- * Ordinarily this is just the one mentioned in the QueryDesc, but if we
- * are looking at a row returned by the EvalPlanQual machinery, we need
- * to look at the subsidiary state instead.
- */
-PlanState *
-ExecGetActivePlanTree(QueryDesc *queryDesc)
-{
-       EState     *estate = queryDesc->estate;
+       FreeExecutorState(estate);
 
-       if (estate && estate->es_useEvalPlan && estate->es_evalPlanQual != NULL)
-               return estate->es_evalPlanQual->planstate;
-       else
-               return queryDesc->planstate;
+       /* Mark EPQState idle */
+       epqstate->estate = NULL;
+       epqstate->planstate = NULL;
+       epqstate->origslot = NULL;
 }
 
 
@@ -2534,15 +2365,15 @@ ExecGetActivePlanTree(QueryDesc *queryDesc)
  *
  * We implement SELECT INTO by diverting SELECT's normal output with
  * a specialized DestReceiver type.
- *
- * TODO: remove some of the INTO-specific cruft from EState, and keep
- * it in the DestReceiver instead.
  */
 
 typedef struct
 {
        DestReceiver pub;                       /* publicly-known function pointers */
        EState     *estate;                     /* EState we are working with */
+       Relation        rel;                    /* Relation to write to */
+       int                     hi_options;             /* heap_insert performance options */
+       BulkInsertState bistate;        /* bulk insert state */
 } DR_intorel;
 
 /*
@@ -2562,17 +2393,22 @@ OpenIntoRel(QueryDesc *queryDesc)
        Oid                     namespaceId;
        Oid                     tablespaceId;
        Datum           reloptions;
-       AclResult       aclresult;
        Oid                     intoRelationId;
-       TupleDesc       tupdesc;
        DR_intorel *myState;
+       static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
 
        Assert(into);
 
        /*
+        * XXX This code needs to be kept in sync with DefineRelation(). Maybe we
+        * should try to use that function instead.
+        */
+
+       /*
         * Check consistency of arguments
         */
-       if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
+       if (into->onCommit != ONCOMMIT_NOOP
+               && into->rel->relpersistence != RELPERSISTENCE_TEMP)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
                                 errmsg("ON COMMIT can only be used on temporary tables")));
@@ -2581,13 +2417,19 @@ OpenIntoRel(QueryDesc *queryDesc)
         * Find namespace to create in, check its permissions
         */
        intoName = into->rel->relname;
-       namespaceId = RangeVarGetCreationNamespace(into->rel);
+       namespaceId = RangeVarGetAndCheckCreationNamespace(into->rel);
+       RangeVarAdjustRelationPersistence(into->rel, namespaceId);
 
-       aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
-                                                                         ACL_CREATE);
-       if (aclresult != ACLCHECK_OK)
-               aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
-                                          get_namespace_name(namespaceId));
+       /*
+        * Security check: disallow creating temp tables from security-restricted
+        * code.  This is needed because calling code might not expect untrusted
+        * tables to appear in pg_temp at the front of its search path.
+        */
+       if (into->rel->relpersistence == RELPERSISTENCE_TEMP
+               && InSecurityRestrictedOperation())
+               ereport(ERROR,
+                               (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+                                errmsg("cannot create temporary table within security-restricted operation")));
 
        /*
         * Select tablespace to use.  If not specified, use default tablespace
@@ -2595,16 +2437,11 @@ OpenIntoRel(QueryDesc *queryDesc)
         */
        if (into->tableSpaceName)
        {
-               tablespaceId = get_tablespace_oid(into->tableSpaceName);
-               if (!OidIsValid(tablespaceId))
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_UNDEFINED_OBJECT),
-                                        errmsg("tablespace \"%s\" does not exist",
-                                                       into->tableSpaceName)));
+               tablespaceId = get_tablespace_oid(into->tableSpaceName, false);
        }
        else
        {
-               tablespaceId = GetDefaultTablespace(into->rel->istemp);
+               tablespaceId = GetDefaultTablespace(into->rel->relpersistence);
                /* note InvalidOid is OK in this case */
        }
 
@@ -2624,30 +2461,33 @@ OpenIntoRel(QueryDesc *queryDesc)
        /* Parse and validate any reloptions */
        reloptions = transformRelOptions((Datum) 0,
                                                                         into->options,
+                                                                        NULL,
+                                                                        validnsps,
                                                                         true,
                                                                         false);
        (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
 
-       /* Copy the tupdesc because heap_create_with_catalog modifies it */
-       tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
-
        /* Now we can actually create the new relation */
        intoRelationId = heap_create_with_catalog(intoName,
                                                                                          namespaceId,
                                                                                          tablespaceId,
                                                                                          InvalidOid,
+                                                                                         InvalidOid,
+                                                                                         InvalidOid,
                                                                                          GetUserId(),
-                                                                                         tupdesc,
+                                                                                         queryDesc->tupDesc,
                                                                                          NIL,
                                                                                          RELKIND_RELATION,
+                                                                                         into->rel->relpersistence,
+                                                                                         false,
                                                                                          false,
                                                                                          true,
                                                                                          0,
                                                                                          into->onCommit,
                                                                                          reloptions,
+                                                                                         true,
                                                                                          allowSystemTableMods);
-
-       FreeTupleDesc(tupdesc);
+       Assert(intoRelationId != InvalidOid);
 
        /*
         * Advance command counter so that the newly-created relation's catalog
@@ -2660,29 +2500,41 @@ OpenIntoRel(QueryDesc *queryDesc)
         * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
         * the TOAST table will be visible for insertion.
         */
-       AlterTableCreateToastTable(intoRelationId);
+       reloptions = transformRelOptions((Datum) 0,
+                                                                        into->options,
+                                                                        "toast",
+                                                                        validnsps,
+                                                                        true,
+                                                                        false);
 
-       /*
-        * And open the constructed table for writing.
-        */
-       intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
+       (void) heap_reloptions(RELKIND_TOASTVALUE, reloptions, true);
 
-       /* use_wal off requires rd_targblock be initially invalid */
-       Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
+       AlterTableCreateToastTable(intoRelationId, reloptions);
 
        /*
-        * We can skip WAL-logging the insertions, unless PITR is in use.
+        * And open the constructed table for writing.
         */
-       estate->es_into_relation_use_wal = XLogArchivingActive();
-       estate->es_into_relation_descriptor = intoRelationDesc;
+       intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
 
        /*
         * Now replace the query's DestReceiver with one for SELECT INTO
         */
-       queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
+       queryDesc->dest = CreateDestReceiver(DestIntoRel);
        myState = (DR_intorel *) queryDesc->dest;
        Assert(myState->pub.mydest == DestIntoRel);
        myState->estate = estate;
+       myState->rel = intoRelationDesc;
+
+       /*
+        * We can skip WAL-logging the insertions, unless PITR or streaming
+        * replication is in use. We can skip the FSM in any case.
+        */
+       myState->hi_options = HEAP_INSERT_SKIP_FSM |
+               (XLogIsNeeded() ? 0 : HEAP_INSERT_SKIP_WAL);
+       myState->bistate = GetBulkInsertState();
+
+       /* Not using WAL requires smgr_targblock be initially invalid */
+       Assert(RelationGetTargetBlock(intoRelationDesc) == InvalidBlockNumber);
 }
 
 /*
@@ -2691,33 +2543,31 @@ OpenIntoRel(QueryDesc *queryDesc)
 static void
 CloseIntoRel(QueryDesc *queryDesc)
 {
-       EState     *estate = queryDesc->estate;
+       DR_intorel *myState = (DR_intorel *) queryDesc->dest;
 
        /* OpenIntoRel might never have gotten called */
-       if (estate->es_into_relation_descriptor)
+       if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
        {
+               FreeBulkInsertState(myState->bistate);
+
                /* If we skipped using WAL, must heap_sync before commit */
-               if (!estate->es_into_relation_use_wal)
-                       heap_sync(estate->es_into_relation_descriptor);
+               if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
+                       heap_sync(myState->rel);
 
                /* close rel, but keep lock until commit */
-               heap_close(estate->es_into_relation_descriptor, NoLock);
+               heap_close(myState->rel, NoLock);
 
-               estate->es_into_relation_descriptor = NULL;
+               myState->rel = NULL;
        }
 }
 
 /*
  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
- *
- * Since CreateDestReceiver doesn't accept the parameters we'd need,
- * we just leave the private fields empty here.  OpenIntoRel will
- * fill them in.
  */
 DestReceiver *
 CreateIntoRelDestReceiver(void)
 {
-       DR_intorel *self = (DR_intorel *) palloc(sizeof(DR_intorel));
+       DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
 
        self->pub.receiveSlot = intorel_receive;
        self->pub.rStartup = intorel_startup;
@@ -2725,7 +2575,7 @@ CreateIntoRelDestReceiver(void)
        self->pub.rDestroy = intorel_destroy;
        self->pub.mydest = DestIntoRel;
 
-       self->estate = NULL;
+       /* private fields will be set by OpenIntoRel */
 
        return (DestReceiver *) self;
 }
@@ -2746,22 +2596,27 @@ static void
 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
 {
        DR_intorel *myState = (DR_intorel *) self;
-       EState     *estate = myState->estate;
        HeapTuple       tuple;
 
-       tuple = ExecCopySlotTuple(slot);
+       /*
+        * get the heap tuple out of the tuple table slot, making sure we have a
+        * writable copy
+        */
+       tuple = ExecMaterializeSlot(slot);
+
+       /*
+        * force assignment of new OID (see comments in ExecInsert)
+        */
+       if (myState->rel->rd_rel->relhasoids)
+               HeapTupleSetOid(tuple, InvalidOid);
 
-       heap_insert(estate->es_into_relation_descriptor,
+       heap_insert(myState->rel,
                                tuple,
-                               estate->es_output_cid,
-                               estate->es_into_relation_use_wal,
-                               false);                 /* never any point in using FSM */
+                               myState->estate->es_output_cid,
+                               myState->hi_options,
+                               myState->bistate);
 
        /* We know this is a newly created relation, so there are no indexes */
-
-       heap_freetuple(tuple);
-
-       IncrAppended();
 }
 
 /*