OSDN Git Service

6e386d25292396e17c5bb981b85cc2cf21639c09
[pg-rex/syncrep.git] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.235 2004/08/29 04:12:31 momjian Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "catalog/heap.h"
37 #include "catalog/namespace.h"
38 #include "commands/tablecmds.h"
39 #include "commands/trigger.h"
40 #include "executor/execdebug.h"
41 #include "executor/execdefs.h"
42 #include "miscadmin.h"
43 #include "optimizer/clauses.h"
44 #include "optimizer/var.h"
45 #include "parser/parsetree.h"
46 #include "utils/acl.h"
47 #include "utils/guc.h"
48 #include "utils/lsyscache.h"
49
50
51 typedef struct execRowMark
52 {
53         Relation        relation;
54         Index           rti;
55         char            resname[32];
56 } execRowMark;
57
58 typedef struct evalPlanQual
59 {
60         Index           rti;
61         EState     *estate;
62         PlanState  *planstate;
63         struct evalPlanQual *next;      /* stack of active PlanQual plans */
64         struct evalPlanQual *free;      /* list of free PlanQual plans */
65 } evalPlanQual;
66
67 /* decls for local routines only used within this module */
68 static void InitPlan(QueryDesc *queryDesc, bool explainOnly);
69 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
70                                   Index resultRelationIndex,
71                                   List *rangeTable,
72                                   CmdType operation);
73 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
74                         CmdType operation,
75                         long numberTuples,
76                         ScanDirection direction,
77                         DestReceiver *dest);
78 static void ExecSelect(TupleTableSlot *slot,
79                    DestReceiver *dest,
80                    EState *estate);
81 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
82                    EState *estate);
83 static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
84                    EState *estate);
85 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
86                    EState *estate);
87 static TupleTableSlot *EvalPlanQualNext(EState *estate);
88 static void EndEvalPlanQual(EState *estate);
89 static void ExecCheckRTEPerms(RangeTblEntry *rte);
90 static void ExecCheckXactReadOnly(Query *parsetree);
91 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
92                                   evalPlanQual *priorepq);
93 static void EvalPlanQualStop(evalPlanQual *epq);
94
95 /* end of local decls */
96
97
98 /* ----------------------------------------------------------------
99  *              ExecutorStart
100  *
101  *              This routine must be called at the beginning of any execution of any
102  *              query plan
103  *
104  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
105  * clear why we bother to separate the two functions, but...).  The tupDesc
106  * field of the QueryDesc is filled in to describe the tuples that will be
107  * returned, and the internal fields (estate and planstate) are set up.
108  *
109  * If useCurrentSnapshot is true, run the query with the latest available
110  * snapshot, instead of the normal QuerySnapshot.  Also, if it's an update
111  * or delete query, check that the rows to be updated or deleted would be
112  * visible to the normal QuerySnapshot.  (This is a special-case behavior
113  * needed for referential integrity updates in serializable transactions.
114  * We must check all currently-committed rows, but we want to throw a
115  * can't-serialize error if any rows that would need updates would not be
116  * visible under the normal serializable snapshot.)
117  *
118  * If explainOnly is true, we are not actually intending to run the plan,
119  * only to set up for EXPLAIN; so skip unwanted side-effects.
120  *
121  * NB: the CurrentMemoryContext when this is called will become the parent
122  * of the per-query context used for this Executor invocation.
123  * ----------------------------------------------------------------
124  */
125 void
126 ExecutorStart(QueryDesc *queryDesc, bool useCurrentSnapshot, bool explainOnly)
127 {
128         EState     *estate;
129         MemoryContext oldcontext;
130
131         /* sanity checks: queryDesc must not be started already */
132         Assert(queryDesc != NULL);
133         Assert(queryDesc->estate == NULL);
134
135         /*
136          * If the transaction is read-only, we need to check if any writes are
137          * planned to non-temporary tables.
138          */
139         if (XactReadOnly && !explainOnly)
140                 ExecCheckXactReadOnly(queryDesc->parsetree);
141
142         /*
143          * Build EState, switch into per-query memory context for startup.
144          */
145         estate = CreateExecutorState();
146         queryDesc->estate = estate;
147
148         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
149
150         /*
151          * Fill in parameters, if any, from queryDesc
152          */
153         estate->es_param_list_info = queryDesc->params;
154
155         if (queryDesc->plantree->nParamExec > 0)
156                 estate->es_param_exec_vals = (ParamExecData *)
157                         palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData));
158
159         estate->es_instrument = queryDesc->doInstrument;
160
161         /*
162          * Make our own private copy of the current query snapshot data.
163          *
164          * This "freezes" our idea of which tuples are good and which are not for
165          * the life of this query, even if it outlives the current command and
166          * current snapshot.
167          */
168         if (useCurrentSnapshot)
169         {
170                 /* RI update/delete query --- must use an up-to-date snapshot */
171                 estate->es_snapshot = CopyCurrentSnapshot();
172                 /* crosscheck updates/deletes against transaction snapshot */
173                 estate->es_crosscheck_snapshot = CopyQuerySnapshot();
174         }
175         else
176         {
177                 /* normal query --- use query snapshot, no crosscheck */
178                 estate->es_snapshot = CopyQuerySnapshot();
179                 estate->es_crosscheck_snapshot = SnapshotAny;
180         }
181
182         /*
183          * Initialize the plan state tree
184          */
185         InitPlan(queryDesc, explainOnly);
186
187         MemoryContextSwitchTo(oldcontext);
188 }
189
190 /* ----------------------------------------------------------------
191  *              ExecutorRun
192  *
193  *              This is the main routine of the executor module. It accepts
194  *              the query descriptor from the traffic cop and executes the
195  *              query plan.
196  *
197  *              ExecutorStart must have been called already.
198  *
199  *              If direction is NoMovementScanDirection then nothing is done
200  *              except to start up/shut down the destination.  Otherwise,
201  *              we retrieve up to 'count' tuples in the specified direction.
202  *
203  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
204  *              completion.
205  *
206  * ----------------------------------------------------------------
207  */
208 TupleTableSlot *
209 ExecutorRun(QueryDesc *queryDesc,
210                         ScanDirection direction, long count)
211 {
212         EState     *estate;
213         CmdType         operation;
214         DestReceiver *dest;
215         TupleTableSlot *result;
216         MemoryContext oldcontext;
217
218         /* sanity checks */
219         Assert(queryDesc != NULL);
220
221         estate = queryDesc->estate;
222
223         Assert(estate != NULL);
224
225         /*
226          * Switch into per-query memory context
227          */
228         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
229
230         /*
231          * extract information from the query descriptor and the query
232          * feature.
233          */
234         operation = queryDesc->operation;
235         dest = queryDesc->dest;
236
237         /*
238          * startup tuple receiver
239          */
240         estate->es_processed = 0;
241         estate->es_lastoid = InvalidOid;
242
243         (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
244
245         /*
246          * run plan
247          */
248         if (direction == NoMovementScanDirection)
249                 result = NULL;
250         else
251                 result = ExecutePlan(estate,
252                                                          queryDesc->planstate,
253                                                          operation,
254                                                          count,
255                                                          direction,
256                                                          dest);
257
258         /*
259          * shutdown receiver
260          */
261         (*dest->rShutdown) (dest);
262
263         MemoryContextSwitchTo(oldcontext);
264
265         return result;
266 }
267
268 /* ----------------------------------------------------------------
269  *              ExecutorEnd
270  *
271  *              This routine must be called at the end of execution of any
272  *              query plan
273  * ----------------------------------------------------------------
274  */
275 void
276 ExecutorEnd(QueryDesc *queryDesc)
277 {
278         EState     *estate;
279         MemoryContext oldcontext;
280
281         /* sanity checks */
282         Assert(queryDesc != NULL);
283
284         estate = queryDesc->estate;
285
286         Assert(estate != NULL);
287
288         /*
289          * Switch into per-query memory context to run ExecEndPlan
290          */
291         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
292
293         ExecEndPlan(queryDesc->planstate, estate);
294
295         /*
296          * Must switch out of context before destroying it
297          */
298         MemoryContextSwitchTo(oldcontext);
299
300         /*
301          * Release EState and per-query memory context.  This should release
302          * everything the executor has allocated.
303          */
304         FreeExecutorState(estate);
305
306         /* Reset queryDesc fields that no longer point to anything */
307         queryDesc->tupDesc = NULL;
308         queryDesc->estate = NULL;
309         queryDesc->planstate = NULL;
310 }
311
312 /* ----------------------------------------------------------------
313  *              ExecutorRewind
314  *
315  *              This routine may be called on an open queryDesc to rewind it
316  *              to the start.
317  * ----------------------------------------------------------------
318  */
319 void
320 ExecutorRewind(QueryDesc *queryDesc)
321 {
322         EState     *estate;
323         MemoryContext oldcontext;
324
325         /* sanity checks */
326         Assert(queryDesc != NULL);
327
328         estate = queryDesc->estate;
329
330         Assert(estate != NULL);
331
332         /* It's probably not sensible to rescan updating queries */
333         Assert(queryDesc->operation == CMD_SELECT);
334
335         /*
336          * Switch into per-query memory context
337          */
338         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
339
340         /*
341          * rescan plan
342          */
343         ExecReScan(queryDesc->planstate, NULL);
344
345         MemoryContextSwitchTo(oldcontext);
346 }
347
348
349 /*
350  * ExecCheckRTPerms
351  *              Check access permissions for all relations listed in a range table.
352  */
353 void
354 ExecCheckRTPerms(List *rangeTable)
355 {
356         ListCell   *l;
357
358         foreach(l, rangeTable)
359         {
360                 RangeTblEntry *rte = lfirst(l);
361
362                 ExecCheckRTEPerms(rte);
363         }
364 }
365
366 /*
367  * ExecCheckRTEPerms
368  *              Check access permissions for a single RTE.
369  */
370 static void
371 ExecCheckRTEPerms(RangeTblEntry *rte)
372 {
373         AclMode         requiredPerms;
374         Oid                     relOid;
375         AclId           userid;
376
377         /*
378          * If it's a subquery, recursively examine its rangetable.
379          */
380         if (rte->rtekind == RTE_SUBQUERY)
381         {
382                 ExecCheckRTPerms(rte->subquery->rtable);
383                 return;
384         }
385
386         /*
387          * Otherwise, only plain-relation RTEs need to be checked here.
388          * Function RTEs are checked by init_fcache when the function is
389          * prepared for execution. Join and special RTEs need no checks.
390          */
391         if (rte->rtekind != RTE_RELATION)
392                 return;
393
394         /*
395          * No work if requiredPerms is empty.
396          */
397         requiredPerms = rte->requiredPerms;
398         if (requiredPerms == 0)
399                 return;
400
401         relOid = rte->relid;
402
403         /*
404          * userid to check as: current user unless we have a setuid
405          * indication.
406          *
407          * Note: GetUserId() is presently fast enough that there's no harm in
408          * calling it separately for each RTE.  If that stops being true, we
409          * could call it once in ExecCheckRTPerms and pass the userid down
410          * from there.  But for now, no need for the extra clutter.
411          */
412         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
413
414         /*
415          * We must have *all* the requiredPerms bits, so use aclmask not
416          * aclcheck.
417          */
418         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
419                 != requiredPerms)
420                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
421                                            get_rel_name(relOid));
422 }
423
424 /*
425  * Check that the query does not imply any writes to non-temp tables.
426  */
427 static void
428 ExecCheckXactReadOnly(Query *parsetree)
429 {
430         ListCell   *l;
431
432         /*
433          * CREATE TABLE AS or SELECT INTO?
434          *
435          * XXX should we allow this if the destination is temp?
436          */
437         if (parsetree->into != NULL)
438                 goto fail;
439
440         /* Fail if write permissions are requested on any non-temp table */
441         foreach(l, parsetree->rtable)
442         {
443                 RangeTblEntry *rte = lfirst(l);
444
445                 if (rte->rtekind == RTE_SUBQUERY)
446                 {
447                         ExecCheckXactReadOnly(rte->subquery);
448                         continue;
449                 }
450
451                 if (rte->rtekind != RTE_RELATION)
452                         continue;
453
454                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
455                         continue;
456
457                 if (isTempNamespace(get_rel_namespace(rte->relid)))
458                         continue;
459
460                 goto fail;
461         }
462
463         return;
464
465 fail:
466         ereport(ERROR,
467                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
468                          errmsg("transaction is read-only")));
469 }
470
471
472 /* ----------------------------------------------------------------
473  *              InitPlan
474  *
475  *              Initializes the query plan: open files, allocate storage
476  *              and start up the rule manager
477  * ----------------------------------------------------------------
478  */
479 static void
480 InitPlan(QueryDesc *queryDesc, bool explainOnly)
481 {
482         CmdType         operation = queryDesc->operation;
483         Query      *parseTree = queryDesc->parsetree;
484         Plan       *plan = queryDesc->plantree;
485         EState     *estate = queryDesc->estate;
486         PlanState  *planstate;
487         List       *rangeTable;
488         Relation        intoRelationDesc;
489         bool            do_select_into;
490         TupleDesc       tupType;
491
492         /*
493          * Do permissions checks.  It's sufficient to examine the query's top
494          * rangetable here --- subplan RTEs will be checked during
495          * ExecInitSubPlan().
496          */
497         ExecCheckRTPerms(parseTree->rtable);
498
499         /*
500          * get information from query descriptor
501          */
502         rangeTable = parseTree->rtable;
503
504         /*
505          * initialize the node's execution state
506          */
507         estate->es_range_table = rangeTable;
508
509         /*
510          * if there is a result relation, initialize result relation stuff
511          */
512         if (parseTree->resultRelation != 0 && operation != CMD_SELECT)
513         {
514                 List       *resultRelations = parseTree->resultRelations;
515                 int                     numResultRelations;
516                 ResultRelInfo *resultRelInfos;
517
518                 if (resultRelations != NIL)
519                 {
520                         /*
521                          * Multiple result relations (due to inheritance)
522                          * parseTree->resultRelations identifies them all
523                          */
524                         ResultRelInfo   *resultRelInfo;
525                         ListCell                *l;
526
527                         numResultRelations = list_length(resultRelations);
528                         resultRelInfos = (ResultRelInfo *)
529                                 palloc(numResultRelations * sizeof(ResultRelInfo));
530                         resultRelInfo = resultRelInfos;
531                         foreach(l, resultRelations)
532                         {
533                                 initResultRelInfo(resultRelInfo,
534                                                                   lfirst_int(l),
535                                                                   rangeTable,
536                                                                   operation);
537                                 resultRelInfo++;
538                         }
539                 }
540                 else
541                 {
542                         /*
543                          * Single result relation identified by
544                          * parseTree->resultRelation
545                          */
546                         numResultRelations = 1;
547                         resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
548                         initResultRelInfo(resultRelInfos,
549                                                           parseTree->resultRelation,
550                                                           rangeTable,
551                                                           operation);
552                 }
553
554                 estate->es_result_relations = resultRelInfos;
555                 estate->es_num_result_relations = numResultRelations;
556                 /* Initialize to first or only result rel */
557                 estate->es_result_relation_info = resultRelInfos;
558         }
559         else
560         {
561                 /*
562                  * if no result relation, then set state appropriately
563                  */
564                 estate->es_result_relations = NULL;
565                 estate->es_num_result_relations = 0;
566                 estate->es_result_relation_info = NULL;
567         }
568
569         /*
570          * Detect whether we're doing SELECT INTO.  If so, set the force_oids
571          * flag appropriately so that the plan tree will be initialized with
572          * the correct tuple descriptors.
573          */
574         do_select_into = false;
575
576         if (operation == CMD_SELECT && parseTree->into != NULL)
577         {
578                 do_select_into = true;
579                 estate->es_select_into = true;
580                 estate->es_into_oids = parseTree->intoHasOids;
581         }
582
583         /*
584          * Have to lock relations selected for update
585          */
586         estate->es_rowMark = NIL;
587         if (parseTree->rowMarks != NIL)
588         {
589                 ListCell   *l;
590
591                 foreach(l, parseTree->rowMarks)
592                 {
593                         Index           rti = lfirst_int(l);
594                         Oid                     relid = getrelid(rti, rangeTable);
595                         Relation        relation;
596                         execRowMark *erm;
597
598                         relation = heap_open(relid, RowShareLock);
599                         erm = (execRowMark *) palloc(sizeof(execRowMark));
600                         erm->relation = relation;
601                         erm->rti = rti;
602                         snprintf(erm->resname, sizeof(erm->resname), "ctid%u", rti);
603                         estate->es_rowMark = lappend(estate->es_rowMark, erm);
604                 }
605         }
606
607         /*
608          * initialize the executor "tuple" table.  We need slots for all the
609          * plan nodes, plus possibly output slots for the junkfilter(s). At
610          * this point we aren't sure if we need junkfilters, so just add slots
611          * for them unconditionally.
612          */
613         {
614                 int                     nSlots = ExecCountSlotsNode(plan);
615
616                 if (parseTree->resultRelations != NIL)
617                         nSlots += list_length(parseTree->resultRelations);
618                 else
619                         nSlots += 1;
620                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
621         }
622
623         /* mark EvalPlanQual not active */
624         estate->es_topPlan = plan;
625         estate->es_evalPlanQual = NULL;
626         estate->es_evTupleNull = NULL;
627         estate->es_evTuple = NULL;
628         estate->es_useEvalPlan = false;
629
630         /*
631          * initialize the private state information for all the nodes in the
632          * query tree.  This opens files, allocates storage and leaves us
633          * ready to start processing tuples.
634          */
635         planstate = ExecInitNode(plan, estate);
636
637         /*
638          * Get the tuple descriptor describing the type of tuples to return.
639          * (this is especially important if we are creating a relation with
640          * "SELECT INTO")
641          */
642         tupType = ExecGetResultType(planstate);
643
644         /*
645          * Initialize the junk filter if needed.  SELECT and INSERT queries
646          * need a filter if there are any junk attrs in the tlist.      INSERT and
647          * SELECT INTO also need a filter if the plan may return raw disk tuples
648          * (else heap_insert will be scribbling on the source relation!).
649          * UPDATE and DELETE always need a filter, since there's always a junk
650          * 'ctid' attribute present --- no need to look first.
651          */
652         {
653                 bool            junk_filter_needed = false;
654                 ListCell   *tlist;
655
656                 switch (operation)
657                 {
658                         case CMD_SELECT:
659                         case CMD_INSERT:
660                                 foreach(tlist, plan->targetlist)
661                                 {
662                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
663
664                                         if (tle->resdom->resjunk)
665                                         {
666                                                 junk_filter_needed = true;
667                                                 break;
668                                         }
669                                 }
670                                 if (!junk_filter_needed &&
671                                         (operation == CMD_INSERT || do_select_into) &&
672                                         ExecMayReturnRawTuples(planstate))
673                                         junk_filter_needed = true;
674                                 break;
675                         case CMD_UPDATE:
676                         case CMD_DELETE:
677                                 junk_filter_needed = true;
678                                 break;
679                         default:
680                                 break;
681                 }
682
683                 if (junk_filter_needed)
684                 {
685                         /*
686                          * If there are multiple result relations, each one needs its
687                          * own junk filter.  Note this is only possible for
688                          * UPDATE/DELETE, so we can't be fooled by some needing a
689                          * filter and some not.
690                          */
691                         if (parseTree->resultRelations != NIL)
692                         {
693                                 PlanState **appendplans;
694                                 int                     as_nplans;
695                                 ResultRelInfo *resultRelInfo;
696                                 int                     i;
697
698                                 /* Top plan had better be an Append here. */
699                                 Assert(IsA(plan, Append));
700                                 Assert(((Append *) plan)->isTarget);
701                                 Assert(IsA(planstate, AppendState));
702                                 appendplans = ((AppendState *) planstate)->appendplans;
703                                 as_nplans = ((AppendState *) planstate)->as_nplans;
704                                 Assert(as_nplans == estate->es_num_result_relations);
705                                 resultRelInfo = estate->es_result_relations;
706                                 for (i = 0; i < as_nplans; i++)
707                                 {
708                                         PlanState  *subplan = appendplans[i];
709                                         JunkFilter *j;
710
711                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
712                                                                                    ExecGetResultType(subplan),
713                                                           ExecAllocTableSlot(estate->es_tupleTable));
714                                         resultRelInfo->ri_junkFilter = j;
715                                         resultRelInfo++;
716                                 }
717
718                                 /*
719                                  * Set active junkfilter too; at this point ExecInitAppend
720                                  * has already selected an active result relation...
721                                  */
722                                 estate->es_junkFilter =
723                                         estate->es_result_relation_info->ri_junkFilter;
724                         }
725                         else
726                         {
727                                 /* Normal case with just one JunkFilter */
728                                 JunkFilter *j;
729
730                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
731                                                                            tupType,
732                                                           ExecAllocTableSlot(estate->es_tupleTable));
733                                 estate->es_junkFilter = j;
734                                 if (estate->es_result_relation_info)
735                                         estate->es_result_relation_info->ri_junkFilter = j;
736
737                                 /* For SELECT, want to return the cleaned tuple type */
738                                 if (operation == CMD_SELECT)
739                                         tupType = j->jf_cleanTupType;
740                         }
741                 }
742                 else
743                         estate->es_junkFilter = NULL;
744         }
745
746         /*
747          * If doing SELECT INTO, initialize the "into" relation.  We must wait
748          * till now so we have the "clean" result tuple type to create the new
749          * table from.
750          *
751          * If EXPLAIN, skip creating the "into" relation.
752          */
753         intoRelationDesc = NULL;
754
755         if (do_select_into && !explainOnly)
756         {
757                 char       *intoName;
758                 Oid                     namespaceId;
759                 AclResult       aclresult;
760                 Oid                     intoRelationId;
761                 TupleDesc       tupdesc;
762
763                 /*
764                  * find namespace to create in, check permissions
765                  */
766                 intoName = parseTree->into->relname;
767                 namespaceId = RangeVarGetCreationNamespace(parseTree->into);
768
769                 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
770                                                                                   ACL_CREATE);
771                 if (aclresult != ACLCHECK_OK)
772                         aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
773                                                    get_namespace_name(namespaceId));
774
775                 /*
776                  * have to copy tupType to get rid of constraints
777                  */
778                 tupdesc = CreateTupleDescCopy(tupType);
779
780                 intoRelationId = heap_create_with_catalog(intoName,
781                                                                                                   namespaceId,
782                                                                                                   InvalidOid,
783                                                                                                   tupdesc,
784                                                                                                   RELKIND_RELATION,
785                                                                                                   false,
786                                                                                                   true,
787                                                                                                   0,
788                                                                                                   ONCOMMIT_NOOP,
789                                                                                                   allowSystemTableMods);
790
791                 FreeTupleDesc(tupdesc);
792
793                 /*
794                  * Advance command counter so that the newly-created relation's
795                  * catalog tuples will be visible to heap_open.
796                  */
797                 CommandCounterIncrement();
798
799                 /*
800                  * If necessary, create a TOAST table for the into relation. Note
801                  * that AlterTableCreateToastTable ends with
802                  * CommandCounterIncrement(), so that the TOAST table will be
803                  * visible for insertion.
804                  */
805                 AlterTableCreateToastTable(intoRelationId, true);
806
807                 /*
808                  * And open the constructed table for writing.
809                  */
810                 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
811         }
812
813         estate->es_into_relation_descriptor = intoRelationDesc;
814
815         queryDesc->tupDesc = tupType;
816         queryDesc->planstate = planstate;
817 }
818
819 /*
820  * Initialize ResultRelInfo data for one result relation
821  */
822 static void
823 initResultRelInfo(ResultRelInfo *resultRelInfo,
824                                   Index resultRelationIndex,
825                                   List *rangeTable,
826                                   CmdType operation)
827 {
828         Oid                     resultRelationOid;
829         Relation        resultRelationDesc;
830
831         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
832         resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
833
834         switch (resultRelationDesc->rd_rel->relkind)
835         {
836                 case RELKIND_SEQUENCE:
837                         ereport(ERROR,
838                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
839                                          errmsg("cannot change sequence \"%s\"",
840                                                   RelationGetRelationName(resultRelationDesc))));
841                         break;
842                 case RELKIND_TOASTVALUE:
843                         ereport(ERROR,
844                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
845                                          errmsg("cannot change TOAST relation \"%s\"",
846                                                   RelationGetRelationName(resultRelationDesc))));
847                         break;
848                 case RELKIND_VIEW:
849                         ereport(ERROR,
850                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
851                                          errmsg("cannot change view \"%s\"",
852                                                   RelationGetRelationName(resultRelationDesc))));
853                         break;
854         }
855
856         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
857         resultRelInfo->type = T_ResultRelInfo;
858         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
859         resultRelInfo->ri_RelationDesc = resultRelationDesc;
860         resultRelInfo->ri_NumIndices = 0;
861         resultRelInfo->ri_IndexRelationDescs = NULL;
862         resultRelInfo->ri_IndexRelationInfo = NULL;
863         /* make a copy so as not to depend on relcache info not changing... */
864         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
865         resultRelInfo->ri_TrigFunctions = NULL;
866         resultRelInfo->ri_ConstraintExprs = NULL;
867         resultRelInfo->ri_junkFilter = NULL;
868
869         /*
870          * If there are indices on the result relation, open them and save
871          * descriptors in the result relation info, so that we can add new
872          * index entries for the tuples we add/update.  We need not do this
873          * for a DELETE, however, since deletion doesn't affect indexes.
874          */
875         if (resultRelationDesc->rd_rel->relhasindex &&
876                 operation != CMD_DELETE)
877                 ExecOpenIndices(resultRelInfo);
878 }
879
880 /*
881  *              ExecContextForcesOids
882  *
883  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
884  * we need to ensure that result tuples have space for an OID iff they are
885  * going to be stored into a relation that has OIDs.  In other contexts
886  * we are free to choose whether to leave space for OIDs in result tuples
887  * (we generally don't want to, but we do if a physical-tlist optimization
888  * is possible).  This routine checks the plan context and returns TRUE if the
889  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
890  * *hasoids is set to the required value.
891  *
892  * One reason this is ugly is that all plan nodes in the plan tree will emit
893  * tuples with space for an OID, though we really only need the topmost node
894  * to do so.  However, node types like Sort don't project new tuples but just
895  * return their inputs, and in those cases the requirement propagates down
896  * to the input node.  Eventually we might make this code smart enough to
897  * recognize how far down the requirement really goes, but for now we just
898  * make all plan nodes do the same thing if the top level forces the choice.
899  *
900  * We assume that estate->es_result_relation_info is already set up to
901  * describe the target relation.  Note that in an UPDATE that spans an
902  * inheritance tree, some of the target relations may have OIDs and some not.
903  * We have to make the decisions on a per-relation basis as we initialize
904  * each of the child plans of the topmost Append plan.
905  *
906  * SELECT INTO is even uglier, because we don't have the INTO relation's
907  * descriptor available when this code runs; we have to look aside at a
908  * flag set by InitPlan().
909  */
910 bool
911 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
912 {
913         if (planstate->state->es_select_into)
914         {
915                 *hasoids = planstate->state->es_into_oids;
916                 return true;
917         }
918         else
919         {
920                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
921
922                 if (ri != NULL)
923                 {
924                         Relation        rel = ri->ri_RelationDesc;
925
926                         if (rel != NULL)
927                         {
928                                 *hasoids = rel->rd_rel->relhasoids;
929                                 return true;
930                         }
931                 }
932         }
933
934         return false;
935 }
936
937 /* ----------------------------------------------------------------
938  *              ExecEndPlan
939  *
940  *              Cleans up the query plan -- closes files and frees up storage
941  *
942  * NOTE: we are no longer very worried about freeing storage per se
943  * in this code; FreeExecutorState should be guaranteed to release all
944  * memory that needs to be released.  What we are worried about doing
945  * is closing relations and dropping buffer pins.  Thus, for example,
946  * tuple tables must be cleared or dropped to ensure pins are released.
947  * ----------------------------------------------------------------
948  */
949 void
950 ExecEndPlan(PlanState *planstate, EState *estate)
951 {
952         ResultRelInfo *resultRelInfo;
953         int                     i;
954         ListCell   *l;
955
956         /*
957          * shut down any PlanQual processing we were doing
958          */
959         if (estate->es_evalPlanQual != NULL)
960                 EndEvalPlanQual(estate);
961
962         /*
963          * shut down the node-type-specific query processing
964          */
965         ExecEndNode(planstate);
966
967         /*
968          * destroy the executor "tuple" table.
969          */
970         ExecDropTupleTable(estate->es_tupleTable, true);
971         estate->es_tupleTable = NULL;
972
973         /*
974          * close the result relation(s) if any, but hold locks until xact
975          * commit.
976          */
977         resultRelInfo = estate->es_result_relations;
978         for (i = estate->es_num_result_relations; i > 0; i--)
979         {
980                 /* Close indices and then the relation itself */
981                 ExecCloseIndices(resultRelInfo);
982                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
983                 resultRelInfo++;
984         }
985
986         /*
987          * close the "into" relation if necessary, again keeping lock
988          */
989         if (estate->es_into_relation_descriptor != NULL)
990                 heap_close(estate->es_into_relation_descriptor, NoLock);
991
992         /*
993          * close any relations selected FOR UPDATE, again keeping locks
994          */
995         foreach(l, estate->es_rowMark)
996         {
997                 execRowMark *erm = lfirst(l);
998
999                 heap_close(erm->relation, NoLock);
1000         }
1001 }
1002
1003 /* ----------------------------------------------------------------
1004  *              ExecutePlan
1005  *
1006  *              processes the query plan to retrieve 'numberTuples' tuples in the
1007  *              direction specified.
1008  *
1009  *              Retrieves all tuples if numberTuples is 0
1010  *
1011  *              result is either a slot containing the last tuple in the case
1012  *              of a SELECT or NULL otherwise.
1013  *
1014  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1015  * user can see it
1016  * ----------------------------------------------------------------
1017  */
1018 static TupleTableSlot *
1019 ExecutePlan(EState *estate,
1020                         PlanState *planstate,
1021                         CmdType operation,
1022                         long numberTuples,
1023                         ScanDirection direction,
1024                         DestReceiver *dest)
1025 {
1026         JunkFilter *junkfilter;
1027         TupleTableSlot *slot;
1028         ItemPointer tupleid = NULL;
1029         ItemPointerData tuple_ctid;
1030         long            current_tuple_count;
1031         TupleTableSlot *result;
1032
1033         /*
1034          * initialize local variables
1035          */
1036         slot = NULL;
1037         current_tuple_count = 0;
1038         result = NULL;
1039
1040         /*
1041          * Set the direction.
1042          */
1043         estate->es_direction = direction;
1044
1045         /*
1046          * Process BEFORE EACH STATEMENT triggers
1047          */
1048         switch (operation)
1049         {
1050                 case CMD_UPDATE:
1051                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1052                         break;
1053                 case CMD_DELETE:
1054                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1055                         break;
1056                 case CMD_INSERT:
1057                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1058                         break;
1059                 default:
1060                         /* do nothing */
1061                         break;
1062         }
1063
1064         /*
1065          * Loop until we've processed the proper number of tuples from the
1066          * plan.
1067          */
1068
1069         for (;;)
1070         {
1071                 /* Reset the per-output-tuple exprcontext */
1072                 ResetPerTupleExprContext(estate);
1073
1074                 /*
1075                  * Execute the plan and obtain a tuple
1076                  */
1077 lnext:  ;
1078                 if (estate->es_useEvalPlan)
1079                 {
1080                         slot = EvalPlanQualNext(estate);
1081                         if (TupIsNull(slot))
1082                                 slot = ExecProcNode(planstate);
1083                 }
1084                 else
1085                         slot = ExecProcNode(planstate);
1086
1087                 /*
1088                  * if the tuple is null, then we assume there is nothing more to
1089                  * process so we just return null...
1090                  */
1091                 if (TupIsNull(slot))
1092                 {
1093                         result = NULL;
1094                         break;
1095                 }
1096
1097                 /*
1098                  * if we have a junk filter, then project a new tuple with the
1099                  * junk removed.
1100                  *
1101                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1102                  * (Formerly, we stored it back over the "dirty" tuple, which is
1103                  * WRONG because that tuple slot has the wrong descriptor.)
1104                  *
1105                  * Also, extract all the junk information we need.
1106                  */
1107                 if ((junkfilter = estate->es_junkFilter) != NULL)
1108                 {
1109                         Datum           datum;
1110                         HeapTuple       newTuple;
1111                         bool            isNull;
1112
1113                         /*
1114                          * extract the 'ctid' junk attribute.
1115                          */
1116                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1117                         {
1118                                 if (!ExecGetJunkAttribute(junkfilter,
1119                                                                                   slot,
1120                                                                                   "ctid",
1121                                                                                   &datum,
1122                                                                                   &isNull))
1123                                         elog(ERROR, "could not find junk ctid column");
1124
1125                                 /* shouldn't ever get a null result... */
1126                                 if (isNull)
1127                                         elog(ERROR, "ctid is NULL");
1128
1129                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1130                                 tuple_ctid = *tupleid;  /* make sure we don't free the
1131                                                                                  * ctid!! */
1132                                 tupleid = &tuple_ctid;
1133                         }
1134                         else if (estate->es_rowMark != NIL)
1135                         {
1136                                 ListCell   *l;
1137
1138                 lmark:  ;
1139                                 foreach(l, estate->es_rowMark)
1140                                 {
1141                                         execRowMark *erm = lfirst(l);
1142                                         Buffer          buffer;
1143                                         HeapTupleData tuple;
1144                                         TupleTableSlot *newSlot;
1145                                         int                     test;
1146
1147                                         if (!ExecGetJunkAttribute(junkfilter,
1148                                                                                           slot,
1149                                                                                           erm->resname,
1150                                                                                           &datum,
1151                                                                                           &isNull))
1152                                                 elog(ERROR, "could not find junk \"%s\" column",
1153                                                          erm->resname);
1154
1155                                         /* shouldn't ever get a null result... */
1156                                         if (isNull)
1157                                                 elog(ERROR, "\"%s\" is NULL", erm->resname);
1158
1159                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1160                                         test = heap_mark4update(erm->relation, &tuple, &buffer,
1161                                                                                         estate->es_snapshot->curcid);
1162                                         ReleaseBuffer(buffer);
1163                                         switch (test)
1164                                         {
1165                                                 case HeapTupleSelfUpdated:
1166                                                         /* treat it as deleted; do not process */
1167                                                         goto lnext;
1168
1169                                                 case HeapTupleMayBeUpdated:
1170                                                         break;
1171
1172                                                 case HeapTupleUpdated:
1173                                                         if (IsXactIsoLevelSerializable)
1174                                                                 ereport(ERROR,
1175                                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1176                                                                                  errmsg("could not serialize access due to concurrent update")));
1177                                                         if (!(ItemPointerEquals(&(tuple.t_self),
1178                                                                   (ItemPointer) DatumGetPointer(datum))))
1179                                                         {
1180                                                                 newSlot = EvalPlanQual(estate, erm->rti, &(tuple.t_self));
1181                                                                 if (!(TupIsNull(newSlot)))
1182                                                                 {
1183                                                                         slot = newSlot;
1184                                                                         estate->es_useEvalPlan = true;
1185                                                                         goto lmark;
1186                                                                 }
1187                                                         }
1188
1189                                                         /*
1190                                                          * if tuple was deleted or PlanQual failed for
1191                                                          * updated tuple - we must not return this
1192                                                          * tuple!
1193                                                          */
1194                                                         goto lnext;
1195
1196                                                 default:
1197                                                         elog(ERROR, "unrecognized heap_mark4update status: %u",
1198                                                                  test);
1199                                                         return (NULL);
1200                                         }
1201                                 }
1202                         }
1203
1204                         /*
1205                          * Finally create a new "clean" tuple with all junk attributes
1206                          * removed
1207                          */
1208                         newTuple = ExecRemoveJunk(junkfilter, slot);
1209
1210                         slot = ExecStoreTuple(newTuple,         /* tuple to store */
1211                                                                   junkfilter->jf_resultSlot,    /* dest slot */
1212                                                                   InvalidBuffer,                /* this tuple has no
1213                                                                                                                  * buffer */
1214                                                                   true);                /* tuple should be pfreed */
1215                 }
1216
1217                 /*
1218                  * now that we have a tuple, do the appropriate thing with it..
1219                  * either return it to the user, add it to a relation someplace,
1220                  * delete it from a relation, or modify some of its attributes.
1221                  */
1222                 switch (operation)
1223                 {
1224                         case CMD_SELECT:
1225                                 ExecSelect(slot,        /* slot containing tuple */
1226                                                    dest,        /* destination's tuple-receiver obj */
1227                                                    estate);
1228                                 result = slot;
1229                                 break;
1230
1231                         case CMD_INSERT:
1232                                 ExecInsert(slot, tupleid, estate);
1233                                 result = NULL;
1234                                 break;
1235
1236                         case CMD_DELETE:
1237                                 ExecDelete(slot, tupleid, estate);
1238                                 result = NULL;
1239                                 break;
1240
1241                         case CMD_UPDATE:
1242                                 ExecUpdate(slot, tupleid, estate);
1243                                 result = NULL;
1244                                 break;
1245
1246                         default:
1247                                 elog(ERROR, "unrecognized operation code: %d",
1248                                          (int) operation);
1249                                 result = NULL;
1250                                 break;
1251                 }
1252
1253                 /*
1254                  * check our tuple count.. if we've processed the proper number
1255                  * then quit, else loop again and process more tuples.  Zero
1256                  * numberTuples means no limit.
1257                  */
1258                 current_tuple_count++;
1259                 if (numberTuples && numberTuples == current_tuple_count)
1260                         break;
1261         }
1262
1263         /*
1264          * Process AFTER EACH STATEMENT triggers
1265          */
1266         switch (operation)
1267         {
1268                 case CMD_UPDATE:
1269                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1270                         break;
1271                 case CMD_DELETE:
1272                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1273                         break;
1274                 case CMD_INSERT:
1275                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1276                         break;
1277                 default:
1278                         /* do nothing */
1279                         break;
1280         }
1281
1282         /*
1283          * here, result is either a slot containing a tuple in the case of a
1284          * SELECT or NULL otherwise.
1285          */
1286         return result;
1287 }
1288
1289 /* ----------------------------------------------------------------
1290  *              ExecSelect
1291  *
1292  *              SELECTs are easy.. we just pass the tuple to the appropriate
1293  *              print function.  The only complexity is when we do a
1294  *              "SELECT INTO", in which case we insert the tuple into
1295  *              the appropriate relation (note: this is a newly created relation
1296  *              so we don't need to worry about indices or locks.)
1297  * ----------------------------------------------------------------
1298  */
1299 static void
1300 ExecSelect(TupleTableSlot *slot,
1301                    DestReceiver *dest,
1302                    EState *estate)
1303 {
1304         HeapTuple       tuple;
1305         TupleDesc       attrtype;
1306
1307         /*
1308          * get the heap tuple out of the tuple table slot
1309          */
1310         tuple = slot->val;
1311         attrtype = slot->ttc_tupleDescriptor;
1312
1313         /*
1314          * insert the tuple into the "into relation"
1315          *
1316          * XXX this probably ought to be replaced by a separate destination
1317          */
1318         if (estate->es_into_relation_descriptor != NULL)
1319         {
1320                 heap_insert(estate->es_into_relation_descriptor, tuple,
1321                                         estate->es_snapshot->curcid);
1322                 IncrAppended();
1323         }
1324
1325         /*
1326          * send the tuple to the destination
1327          */
1328         (*dest->receiveTuple) (tuple, attrtype, dest);
1329         IncrRetrieved();
1330         (estate->es_processed)++;
1331 }
1332
1333 /* ----------------------------------------------------------------
1334  *              ExecInsert
1335  *
1336  *              INSERTs are trickier.. we have to insert the tuple into
1337  *              the base relation and insert appropriate tuples into the
1338  *              index relations.
1339  * ----------------------------------------------------------------
1340  */
1341 static void
1342 ExecInsert(TupleTableSlot *slot,
1343                    ItemPointer tupleid,
1344                    EState *estate)
1345 {
1346         HeapTuple       tuple;
1347         ResultRelInfo *resultRelInfo;
1348         Relation        resultRelationDesc;
1349         int                     numIndices;
1350         Oid                     newId;
1351
1352         /*
1353          * get the heap tuple out of the tuple table slot
1354          */
1355         tuple = slot->val;
1356
1357         /*
1358          * get information on the (current) result relation
1359          */
1360         resultRelInfo = estate->es_result_relation_info;
1361         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1362
1363         /* BEFORE ROW INSERT Triggers */
1364         if (resultRelInfo->ri_TrigDesc &&
1365           resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1366         {
1367                 HeapTuple       newtuple;
1368
1369                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1370
1371                 if (newtuple == NULL)   /* "do nothing" */
1372                         return;
1373
1374                 if (newtuple != tuple)  /* modified by Trigger(s) */
1375                 {
1376                         /*
1377                          * Insert modified tuple into tuple table slot, replacing the
1378                          * original.  We assume that it was allocated in per-tuple
1379                          * memory context, and therefore will go away by itself. The
1380                          * tuple table slot should not try to clear it.
1381                          */
1382                         ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
1383                         tuple = newtuple;
1384                 }
1385         }
1386
1387         /*
1388          * Check the constraints of the tuple
1389          */
1390         if (resultRelationDesc->rd_att->constr)
1391                 ExecConstraints(resultRelInfo, slot, estate);
1392
1393         /*
1394          * insert the tuple
1395          */
1396         newId = heap_insert(resultRelationDesc, tuple,
1397                                                 estate->es_snapshot->curcid);
1398
1399         IncrAppended();
1400         (estate->es_processed)++;
1401         estate->es_lastoid = newId;
1402         setLastTid(&(tuple->t_self));
1403
1404         /*
1405          * process indices
1406          *
1407          * Note: heap_insert adds a new tuple to a relation.  As a side effect,
1408          * the tupleid of the new tuple is placed in the new tuple's t_ctid
1409          * field.
1410          */
1411         numIndices = resultRelInfo->ri_NumIndices;
1412         if (numIndices > 0)
1413                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1414
1415         /* AFTER ROW INSERT Triggers */
1416         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1417 }
1418
1419 /* ----------------------------------------------------------------
1420  *              ExecDelete
1421  *
1422  *              DELETE is like UPDATE, we delete the tuple and its
1423  *              index tuples.
1424  * ----------------------------------------------------------------
1425  */
1426 static void
1427 ExecDelete(TupleTableSlot *slot,
1428                    ItemPointer tupleid,
1429                    EState *estate)
1430 {
1431         ResultRelInfo *resultRelInfo;
1432         Relation        resultRelationDesc;
1433         ItemPointerData ctid;
1434         int                     result;
1435
1436         /*
1437          * get information on the (current) result relation
1438          */
1439         resultRelInfo = estate->es_result_relation_info;
1440         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1441
1442         /* BEFORE ROW DELETE Triggers */
1443         if (resultRelInfo->ri_TrigDesc &&
1444           resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1445         {
1446                 bool            dodelete;
1447
1448                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1449                                                                                 estate->es_snapshot->curcid);
1450
1451                 if (!dodelete)                  /* "do nothing" */
1452                         return;
1453         }
1454
1455         /*
1456          * delete the tuple
1457          */
1458 ldelete:;
1459         result = heap_delete(resultRelationDesc, tupleid,
1460                                                  &ctid,
1461                                                  estate->es_snapshot->curcid,
1462                                                  estate->es_crosscheck_snapshot,
1463                                                  true /* wait for commit */);
1464         switch (result)
1465         {
1466                 case HeapTupleSelfUpdated:
1467                         /* already deleted by self; nothing to do */
1468                         return;
1469
1470                 case HeapTupleMayBeUpdated:
1471                         break;
1472
1473                 case HeapTupleUpdated:
1474                         if (IsXactIsoLevelSerializable)
1475                                 ereport(ERROR,
1476                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1477                                                  errmsg("could not serialize access due to concurrent update")));
1478                         else if (!(ItemPointerEquals(tupleid, &ctid)))
1479                         {
1480                                 TupleTableSlot *epqslot = EvalPlanQual(estate,
1481                                                            resultRelInfo->ri_RangeTableIndex, &ctid);
1482
1483                                 if (!TupIsNull(epqslot))
1484                                 {
1485                                         *tupleid = ctid;
1486                                         goto ldelete;
1487                                 }
1488                         }
1489                         /* tuple already deleted; nothing to do */
1490                         return;
1491
1492                 default:
1493                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1494                         return;
1495         }
1496
1497         IncrDeleted();
1498         (estate->es_processed)++;
1499
1500         /*
1501          * Note: Normally one would think that we have to delete index tuples
1502          * associated with the heap tuple now..
1503          *
1504          * ... but in POSTGRES, we have no need to do this because the vacuum
1505          * daemon automatically opens an index scan and deletes index tuples
1506          * when it finds deleted heap tuples. -cim 9/27/89
1507          */
1508
1509         /* AFTER ROW DELETE Triggers */
1510         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1511 }
1512
1513 /* ----------------------------------------------------------------
1514  *              ExecUpdate
1515  *
1516  *              note: we can't run UPDATE queries with transactions
1517  *              off because UPDATEs are actually INSERTs and our
1518  *              scan will mistakenly loop forever, updating the tuple
1519  *              it just inserted..      This should be fixed but until it
1520  *              is, we don't want to get stuck in an infinite loop
1521  *              which corrupts your database..
1522  * ----------------------------------------------------------------
1523  */
1524 static void
1525 ExecUpdate(TupleTableSlot *slot,
1526                    ItemPointer tupleid,
1527                    EState *estate)
1528 {
1529         HeapTuple       tuple;
1530         ResultRelInfo *resultRelInfo;
1531         Relation        resultRelationDesc;
1532         ItemPointerData ctid;
1533         int                     result;
1534         int                     numIndices;
1535
1536         /*
1537          * abort the operation if not running transactions
1538          */
1539         if (IsBootstrapProcessingMode())
1540                 elog(ERROR, "cannot UPDATE during bootstrap");
1541
1542         /*
1543          * get the heap tuple out of the tuple table slot
1544          */
1545         tuple = slot->val;
1546
1547         /*
1548          * get information on the (current) result relation
1549          */
1550         resultRelInfo = estate->es_result_relation_info;
1551         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1552
1553         /* BEFORE ROW UPDATE Triggers */
1554         if (resultRelInfo->ri_TrigDesc &&
1555           resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1556         {
1557                 HeapTuple       newtuple;
1558
1559                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1560                                                                                 tupleid, tuple,
1561                                                                                 estate->es_snapshot->curcid);
1562
1563                 if (newtuple == NULL)   /* "do nothing" */
1564                         return;
1565
1566                 if (newtuple != tuple)  /* modified by Trigger(s) */
1567                 {
1568                         /*
1569                          * Insert modified tuple into tuple table slot, replacing the
1570                          * original.  We assume that it was allocated in per-tuple
1571                          * memory context, and therefore will go away by itself. The
1572                          * tuple table slot should not try to clear it.
1573                          */
1574                         ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
1575                         tuple = newtuple;
1576                 }
1577         }
1578
1579         /*
1580          * Check the constraints of the tuple
1581          *
1582          * If we generate a new candidate tuple after EvalPlanQual testing, we
1583          * must loop back here and recheck constraints.  (We don't need to
1584          * redo triggers, however.      If there are any BEFORE triggers then
1585          * trigger.c will have done mark4update to lock the correct tuple, so
1586          * there's no need to do them again.)
1587          */
1588 lreplace:;
1589         if (resultRelationDesc->rd_att->constr)
1590                 ExecConstraints(resultRelInfo, slot, estate);
1591
1592         /*
1593          * replace the heap tuple
1594          */
1595         result = heap_update(resultRelationDesc, tupleid, tuple,
1596                                                  &ctid,
1597                                                  estate->es_snapshot->curcid,
1598                                                  estate->es_crosscheck_snapshot,
1599                                                  true /* wait for commit */);
1600         switch (result)
1601         {
1602                 case HeapTupleSelfUpdated:
1603                         /* already deleted by self; nothing to do */
1604                         return;
1605
1606                 case HeapTupleMayBeUpdated:
1607                         break;
1608
1609                 case HeapTupleUpdated:
1610                         if (IsXactIsoLevelSerializable)
1611                                 ereport(ERROR,
1612                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1613                                                  errmsg("could not serialize access due to concurrent update")));
1614                         else if (!(ItemPointerEquals(tupleid, &ctid)))
1615                         {
1616                                 TupleTableSlot *epqslot = EvalPlanQual(estate,
1617                                                            resultRelInfo->ri_RangeTableIndex, &ctid);
1618
1619                                 if (!TupIsNull(epqslot))
1620                                 {
1621                                         *tupleid = ctid;
1622                                         tuple = ExecRemoveJunk(estate->es_junkFilter, epqslot);
1623                                         slot = ExecStoreTuple(tuple,
1624                                                                         estate->es_junkFilter->jf_resultSlot,
1625                                                                                   InvalidBuffer, true);
1626                                         goto lreplace;
1627                                 }
1628                         }
1629                         /* tuple already deleted; nothing to do */
1630                         return;
1631
1632                 default:
1633                         elog(ERROR, "unrecognized heap_update status: %u", result);
1634                         return;
1635         }
1636
1637         IncrReplaced();
1638         (estate->es_processed)++;
1639
1640         /*
1641          * Note: instead of having to update the old index tuples associated
1642          * with the heap tuple, all we do is form and insert new index tuples.
1643          * This is because UPDATEs are actually DELETEs and INSERTs and index
1644          * tuple deletion is done automagically by the vacuum daemon. All we
1645          * do is insert new index tuples.  -cim 9/27/89
1646          */
1647
1648         /*
1649          * process indices
1650          *
1651          * heap_update updates a tuple in the base relation by invalidating it
1652          * and then inserting a new tuple to the relation.      As a side effect,
1653          * the tupleid of the new tuple is placed in the new tuple's t_ctid
1654          * field.  So we now insert index tuples using the new tupleid stored
1655          * there.
1656          */
1657
1658         numIndices = resultRelInfo->ri_NumIndices;
1659         if (numIndices > 0)
1660                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1661
1662         /* AFTER ROW UPDATE Triggers */
1663         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1664 }
1665
1666 static const char *
1667 ExecRelCheck(ResultRelInfo *resultRelInfo,
1668                          TupleTableSlot *slot, EState *estate)
1669 {
1670         Relation        rel = resultRelInfo->ri_RelationDesc;
1671         int                     ncheck = rel->rd_att->constr->num_check;
1672         ConstrCheck *check = rel->rd_att->constr->check;
1673         ExprContext *econtext;
1674         MemoryContext oldContext;
1675         List       *qual;
1676         int                     i;
1677
1678         /*
1679          * If first time through for this result relation, build expression
1680          * nodetrees for rel's constraint expressions.  Keep them in the
1681          * per-query memory context so they'll survive throughout the query.
1682          */
1683         if (resultRelInfo->ri_ConstraintExprs == NULL)
1684         {
1685                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1686                 resultRelInfo->ri_ConstraintExprs =
1687                         (List **) palloc(ncheck * sizeof(List *));
1688                 for (i = 0; i < ncheck; i++)
1689                 {
1690                         /* ExecQual wants implicit-AND form */
1691                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1692                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1693                                 ExecPrepareExpr((Expr *) qual, estate);
1694                 }
1695                 MemoryContextSwitchTo(oldContext);
1696         }
1697
1698         /*
1699          * We will use the EState's per-tuple context for evaluating
1700          * constraint expressions (creating it if it's not already there).
1701          */
1702         econtext = GetPerTupleExprContext(estate);
1703
1704         /* Arrange for econtext's scan tuple to be the tuple under test */
1705         econtext->ecxt_scantuple = slot;
1706
1707         /* And evaluate the constraints */
1708         for (i = 0; i < ncheck; i++)
1709         {
1710                 qual = resultRelInfo->ri_ConstraintExprs[i];
1711
1712                 /*
1713                  * NOTE: SQL92 specifies that a NULL result from a constraint
1714                  * expression is not to be treated as a failure.  Therefore, tell
1715                  * ExecQual to return TRUE for NULL.
1716                  */
1717                 if (!ExecQual(qual, econtext, true))
1718                         return check[i].ccname;
1719         }
1720
1721         /* NULL result means no error */
1722         return NULL;
1723 }
1724
1725 void
1726 ExecConstraints(ResultRelInfo *resultRelInfo,
1727                                 TupleTableSlot *slot, EState *estate)
1728 {
1729         Relation        rel = resultRelInfo->ri_RelationDesc;
1730         HeapTuple       tuple = slot->val;
1731         TupleConstr *constr = rel->rd_att->constr;
1732
1733         Assert(constr);
1734
1735         if (constr->has_not_null)
1736         {
1737                 int                     natts = rel->rd_att->natts;
1738                 int                     attrChk;
1739
1740                 for (attrChk = 1; attrChk <= natts; attrChk++)
1741                 {
1742                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1743                                 heap_attisnull(tuple, attrChk))
1744                                 ereport(ERROR,
1745                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1746                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1747                                         NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1748                 }
1749         }
1750
1751         if (constr->num_check > 0)
1752         {
1753                 const char *failed;
1754
1755                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1756                         ereport(ERROR,
1757                                         (errcode(ERRCODE_CHECK_VIOLATION),
1758                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1759                                                         RelationGetRelationName(rel), failed)));
1760         }
1761 }
1762
1763 /*
1764  * Check a modified tuple to see if we want to process its updated version
1765  * under READ COMMITTED rules.
1766  *
1767  * See backend/executor/README for some info about how this works.
1768  */
1769 TupleTableSlot *
1770 EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
1771 {
1772         evalPlanQual *epq;
1773         EState     *epqstate;
1774         Relation        relation;
1775         HeapTupleData tuple;
1776         HeapTuple       copyTuple = NULL;
1777         bool            endNode;
1778
1779         Assert(rti != 0);
1780
1781         /*
1782          * find relation containing target tuple
1783          */
1784         if (estate->es_result_relation_info != NULL &&
1785                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
1786                 relation = estate->es_result_relation_info->ri_RelationDesc;
1787         else
1788         {
1789                 ListCell   *l;
1790
1791                 relation = NULL;
1792                 foreach(l, estate->es_rowMark)
1793                 {
1794                         if (((execRowMark *) lfirst(l))->rti == rti)
1795                         {
1796                                 relation = ((execRowMark *) lfirst(l))->relation;
1797                                 break;
1798                         }
1799                 }
1800                 if (relation == NULL)
1801                         elog(ERROR, "could not find RowMark for RT index %u", rti);
1802         }
1803
1804         /*
1805          * fetch tid tuple
1806          *
1807          * Loop here to deal with updated or busy tuples
1808          */
1809         tuple.t_self = *tid;
1810         for (;;)
1811         {
1812                 Buffer          buffer;
1813
1814                 if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, false, NULL))
1815                 {
1816                         TransactionId xwait = SnapshotDirty->xmax;
1817
1818                         /* xmin should not be dirty... */
1819                         if (TransactionIdIsValid(SnapshotDirty->xmin))
1820                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1821
1822                         /*
1823                          * If tuple is being updated by other transaction then we have
1824                          * to wait for its commit/abort.
1825                          */
1826                         if (TransactionIdIsValid(xwait))
1827                         {
1828                                 ReleaseBuffer(buffer);
1829                                 XactLockTableWait(xwait);
1830                                 continue;
1831                         }
1832
1833                         /*
1834                          * We got tuple - now copy it for use by recheck query.
1835                          */
1836                         copyTuple = heap_copytuple(&tuple);
1837                         ReleaseBuffer(buffer);
1838                         break;
1839                 }
1840
1841                 /*
1842                  * Oops! Invalid tuple. Have to check is it updated or deleted.
1843                  * Note that it's possible to get invalid SnapshotDirty->tid if
1844                  * tuple updated by this transaction. Have we to check this ?
1845                  */
1846                 if (ItemPointerIsValid(&(SnapshotDirty->tid)) &&
1847                         !(ItemPointerEquals(&(tuple.t_self), &(SnapshotDirty->tid))))
1848                 {
1849                         /* updated, so look at the updated copy */
1850                         tuple.t_self = SnapshotDirty->tid;
1851                         continue;
1852                 }
1853
1854                 /*
1855                  * Deleted or updated by this transaction; forget it.
1856                  */
1857                 return NULL;
1858         }
1859
1860         /*
1861          * For UPDATE/DELETE we have to return tid of actual row we're
1862          * executing PQ for.
1863          */
1864         *tid = tuple.t_self;
1865
1866         /*
1867          * Need to run a recheck subquery.      Find or create a PQ stack entry.
1868          */
1869         epq = estate->es_evalPlanQual;
1870         endNode = true;
1871
1872         if (epq != NULL && epq->rti == 0)
1873         {
1874                 /* Top PQ stack entry is idle, so re-use it */
1875                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
1876                 epq->rti = rti;
1877                 endNode = false;
1878         }
1879
1880         /*
1881          * If this is request for another RTE - Ra, - then we have to check
1882          * wasn't PlanQual requested for Ra already and if so then Ra' row was
1883          * updated again and we have to re-start old execution for Ra and
1884          * forget all what we done after Ra was suspended. Cool? -:))
1885          */
1886         if (epq != NULL && epq->rti != rti &&
1887                 epq->estate->es_evTuple[rti - 1] != NULL)
1888         {
1889                 do
1890                 {
1891                         evalPlanQual *oldepq;
1892
1893                         /* stop execution */
1894                         EvalPlanQualStop(epq);
1895                         /* pop previous PlanQual from the stack */
1896                         oldepq = epq->next;
1897                         Assert(oldepq && oldepq->rti != 0);
1898                         /* push current PQ to freePQ stack */
1899                         oldepq->free = epq;
1900                         epq = oldepq;
1901                         estate->es_evalPlanQual = epq;
1902                 } while (epq->rti != rti);
1903         }
1904
1905         /*
1906          * If we are requested for another RTE then we have to suspend
1907          * execution of current PlanQual and start execution for new one.
1908          */
1909         if (epq == NULL || epq->rti != rti)
1910         {
1911                 /* try to reuse plan used previously */
1912                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
1913
1914                 if (newepq == NULL)             /* first call or freePQ stack is empty */
1915                 {
1916                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
1917                         newepq->free = NULL;
1918                         newepq->estate = NULL;
1919                         newepq->planstate = NULL;
1920                 }
1921                 else
1922                 {
1923                         /* recycle previously used PlanQual */
1924                         Assert(newepq->estate == NULL);
1925                         epq->free = NULL;
1926                 }
1927                 /* push current PQ to the stack */
1928                 newepq->next = epq;
1929                 epq = newepq;
1930                 estate->es_evalPlanQual = epq;
1931                 epq->rti = rti;
1932                 endNode = false;
1933         }
1934
1935         Assert(epq->rti == rti);
1936
1937         /*
1938          * Ok - we're requested for the same RTE.  Unfortunately we still have
1939          * to end and restart execution of the plan, because ExecReScan
1940          * wouldn't ensure that upper plan nodes would reset themselves.  We
1941          * could make that work if insertion of the target tuple were
1942          * integrated with the Param mechanism somehow, so that the upper plan
1943          * nodes know that their children's outputs have changed.
1944          *
1945          * Note that the stack of free evalPlanQual nodes is quite useless at the
1946          * moment, since it only saves us from pallocing/releasing the
1947          * evalPlanQual nodes themselves.  But it will be useful once we
1948          * implement ReScan instead of end/restart for re-using PlanQual
1949          * nodes.
1950          */
1951         if (endNode)
1952         {
1953                 /* stop execution */
1954                 EvalPlanQualStop(epq);
1955         }
1956
1957         /*
1958          * Initialize new recheck query.
1959          *
1960          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
1961          * instead copy down changeable state from the top plan (including
1962          * es_result_relation_info, es_junkFilter) and reset locally
1963          * changeable state in the epq (including es_param_exec_vals,
1964          * es_evTupleNull).
1965          */
1966         EvalPlanQualStart(epq, estate, epq->next);
1967
1968         /*
1969          * free old RTE' tuple, if any, and store target tuple where
1970          * relation's scan node will see it
1971          */
1972         epqstate = epq->estate;
1973         if (epqstate->es_evTuple[rti - 1] != NULL)
1974                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
1975         epqstate->es_evTuple[rti - 1] = copyTuple;
1976
1977         return EvalPlanQualNext(estate);
1978 }
1979
1980 static TupleTableSlot *
1981 EvalPlanQualNext(EState *estate)
1982 {
1983         evalPlanQual *epq = estate->es_evalPlanQual;
1984         MemoryContext oldcontext;
1985         TupleTableSlot *slot;
1986
1987         Assert(epq->rti != 0);
1988
1989 lpqnext:;
1990         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
1991         slot = ExecProcNode(epq->planstate);
1992         MemoryContextSwitchTo(oldcontext);
1993
1994         /*
1995          * No more tuples for this PQ. Continue previous one.
1996          */
1997         if (TupIsNull(slot))
1998         {
1999                 evalPlanQual *oldepq;
2000
2001                 /* stop execution */
2002                 EvalPlanQualStop(epq);
2003                 /* pop old PQ from the stack */
2004                 oldepq = epq->next;
2005                 if (oldepq == NULL)
2006                 {
2007                         /* this is the first (oldest) PQ - mark as free */
2008                         epq->rti = 0;
2009                         estate->es_useEvalPlan = false;
2010                         /* and continue Query execution */
2011                         return (NULL);
2012                 }
2013                 Assert(oldepq->rti != 0);
2014                 /* push current PQ to freePQ stack */
2015                 oldepq->free = epq;
2016                 epq = oldepq;
2017                 estate->es_evalPlanQual = epq;
2018                 goto lpqnext;
2019         }
2020
2021         return (slot);
2022 }
2023
2024 static void
2025 EndEvalPlanQual(EState *estate)
2026 {
2027         evalPlanQual *epq = estate->es_evalPlanQual;
2028
2029         if (epq->rti == 0)                      /* plans already shutdowned */
2030         {
2031                 Assert(epq->next == NULL);
2032                 return;
2033         }
2034
2035         for (;;)
2036         {
2037                 evalPlanQual *oldepq;
2038
2039                 /* stop execution */
2040                 EvalPlanQualStop(epq);
2041                 /* pop old PQ from the stack */
2042                 oldepq = epq->next;
2043                 if (oldepq == NULL)
2044                 {
2045                         /* this is the first (oldest) PQ - mark as free */
2046                         epq->rti = 0;
2047                         estate->es_useEvalPlan = false;
2048                         break;
2049                 }
2050                 Assert(oldepq->rti != 0);
2051                 /* push current PQ to freePQ stack */
2052                 oldepq->free = epq;
2053                 epq = oldepq;
2054                 estate->es_evalPlanQual = epq;
2055         }
2056 }
2057
2058 /*
2059  * Start execution of one level of PlanQual.
2060  *
2061  * This is a cut-down version of ExecutorStart(): we copy some state from
2062  * the top-level estate rather than initializing it fresh.
2063  */
2064 static void
2065 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2066 {
2067         EState     *epqstate;
2068         int                     rtsize;
2069         MemoryContext oldcontext;
2070
2071         rtsize = list_length(estate->es_range_table);
2072
2073         epq->estate = epqstate = CreateExecutorState();
2074
2075         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2076
2077         /*
2078          * The epqstates share the top query's copy of unchanging state such
2079          * as the snapshot, rangetable, result-rel info, and external Param
2080          * info. They need their own copies of local state, including a tuple
2081          * table, es_param_exec_vals, etc.
2082          */
2083         epqstate->es_direction = ForwardScanDirection;
2084         epqstate->es_snapshot = estate->es_snapshot;
2085         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2086         epqstate->es_range_table = estate->es_range_table;
2087         epqstate->es_result_relations = estate->es_result_relations;
2088         epqstate->es_num_result_relations = estate->es_num_result_relations;
2089         epqstate->es_result_relation_info = estate->es_result_relation_info;
2090         epqstate->es_junkFilter = estate->es_junkFilter;
2091         epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2092         epqstate->es_param_list_info = estate->es_param_list_info;
2093         if (estate->es_topPlan->nParamExec > 0)
2094                 epqstate->es_param_exec_vals = (ParamExecData *)
2095                         palloc0(estate->es_topPlan->nParamExec * sizeof(ParamExecData));
2096         epqstate->es_rowMark = estate->es_rowMark;
2097         epqstate->es_instrument = estate->es_instrument;
2098         epqstate->es_select_into = estate->es_select_into;
2099         epqstate->es_into_oids = estate->es_into_oids;
2100         epqstate->es_topPlan = estate->es_topPlan;
2101
2102         /*
2103          * Each epqstate must have its own es_evTupleNull state, but all the
2104          * stack entries share es_evTuple state.  This allows sub-rechecks to
2105          * inherit the value being examined by an outer recheck.
2106          */
2107         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2108         if (priorepq == NULL)
2109                 /* first PQ stack entry */
2110                 epqstate->es_evTuple = (HeapTuple *)
2111                         palloc0(rtsize * sizeof(HeapTuple));
2112         else
2113                 /* later stack entries share the same storage */
2114                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2115
2116         epqstate->es_tupleTable =
2117                 ExecCreateTupleTable(estate->es_tupleTable->size);
2118
2119         epq->planstate = ExecInitNode(estate->es_topPlan, epqstate);
2120
2121         MemoryContextSwitchTo(oldcontext);
2122 }
2123
2124 /*
2125  * End execution of one level of PlanQual.
2126  *
2127  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2128  * of the normal cleanup, but *not* close result relations (which we are
2129  * just sharing from the outer query).
2130  */
2131 static void
2132 EvalPlanQualStop(evalPlanQual *epq)
2133 {
2134         EState     *epqstate = epq->estate;
2135         MemoryContext oldcontext;
2136
2137         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2138
2139         ExecEndNode(epq->planstate);
2140
2141         ExecDropTupleTable(epqstate->es_tupleTable, true);
2142         epqstate->es_tupleTable = NULL;
2143
2144         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2145         {
2146                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2147                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2148         }
2149
2150         MemoryContextSwitchTo(oldcontext);
2151
2152         FreeExecutorState(epqstate);
2153
2154         epq->estate = NULL;
2155         epq->planstate = NULL;
2156 }