OSDN Git Service

Modify UPDATE/DELETE WHERE CURRENT OF to use the FOR UPDATE infrastructure to
[pg-rex/syncrep.git] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.317 2008/11/16 17:34:28 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "nodes/nodeFuncs.h"
49 #include "optimizer/clauses.h"
50 #include "parser/parse_clause.h"
51 #include "parser/parsetree.h"
52 #include "storage/bufmgr.h"
53 #include "storage/lmgr.h"
54 #include "storage/smgr.h"
55 #include "utils/acl.h"
56 #include "utils/builtins.h"
57 #include "utils/lsyscache.h"
58 #include "utils/memutils.h"
59 #include "utils/snapmgr.h"
60 #include "utils/tqual.h"
61
62
63 /* Hook for plugins to get control in ExecutorRun() */
64 ExecutorRun_hook_type ExecutorRun_hook = NULL;
65
66 typedef struct evalPlanQual
67 {
68         Index           rti;
69         EState     *estate;
70         PlanState  *planstate;
71         struct evalPlanQual *next;      /* stack of active PlanQual plans */
72         struct evalPlanQual *free;      /* list of free PlanQual plans */
73 } evalPlanQual;
74
75 /* decls for local routines only used within this module */
76 static void InitPlan(QueryDesc *queryDesc, int eflags);
77 static void ExecCheckPlanOutput(Relation resultRel, List *targetList);
78 static void ExecEndPlan(PlanState *planstate, EState *estate);
79 static void ExecutePlan(EState *estate, PlanState *planstate,
80                         CmdType operation,
81                         long numberTuples,
82                         ScanDirection direction,
83                         DestReceiver *dest);
84 static void ExecSelect(TupleTableSlot *slot,
85                    DestReceiver *dest, EState *estate);
86 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
87                    TupleTableSlot *planSlot,
88                    DestReceiver *dest, EState *estate);
89 static void ExecDelete(ItemPointer tupleid,
90                    TupleTableSlot *planSlot,
91                    DestReceiver *dest, EState *estate);
92 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
93                    TupleTableSlot *planSlot,
94                    DestReceiver *dest, EState *estate);
95 static void ExecProcessReturning(ProjectionInfo *projectReturning,
96                                          TupleTableSlot *tupleSlot,
97                                          TupleTableSlot *planSlot,
98                                          DestReceiver *dest);
99 static TupleTableSlot *EvalPlanQualNext(EState *estate);
100 static void EndEvalPlanQual(EState *estate);
101 static void ExecCheckRTPerms(List *rangeTable);
102 static void ExecCheckRTEPerms(RangeTblEntry *rte);
103 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
104 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
105                                   evalPlanQual *priorepq);
106 static void EvalPlanQualStop(evalPlanQual *epq);
107 static void OpenIntoRel(QueryDesc *queryDesc);
108 static void CloseIntoRel(QueryDesc *queryDesc);
109 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
110 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
111 static void intorel_shutdown(DestReceiver *self);
112 static void intorel_destroy(DestReceiver *self);
113
114 /* end of local decls */
115
116
117 /* ----------------------------------------------------------------
118  *              ExecutorStart
119  *
120  *              This routine must be called at the beginning of any execution of any
121  *              query plan
122  *
123  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
124  * clear why we bother to separate the two functions, but...).  The tupDesc
125  * field of the QueryDesc is filled in to describe the tuples that will be
126  * returned, and the internal fields (estate and planstate) are set up.
127  *
128  * eflags contains flag bits as described in executor.h.
129  *
130  * NB: the CurrentMemoryContext when this is called will become the parent
131  * of the per-query context used for this Executor invocation.
132  * ----------------------------------------------------------------
133  */
134 void
135 ExecutorStart(QueryDesc *queryDesc, int eflags)
136 {
137         EState     *estate;
138         MemoryContext oldcontext;
139
140         /* sanity checks: queryDesc must not be started already */
141         Assert(queryDesc != NULL);
142         Assert(queryDesc->estate == NULL);
143
144         /*
145          * If the transaction is read-only, we need to check if any writes are
146          * planned to non-temporary tables.  EXPLAIN is considered read-only.
147          */
148         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
149                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
150
151         /*
152          * Build EState, switch into per-query memory context for startup.
153          */
154         estate = CreateExecutorState();
155         queryDesc->estate = estate;
156
157         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
158
159         /*
160          * Fill in parameters, if any, from queryDesc
161          */
162         estate->es_param_list_info = queryDesc->params;
163
164         if (queryDesc->plannedstmt->nParamExec > 0)
165                 estate->es_param_exec_vals = (ParamExecData *)
166                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
167
168         /*
169          * If non-read-only query, set the command ID to mark output tuples with
170          */
171         switch (queryDesc->operation)
172         {
173                 case CMD_SELECT:
174                         /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
175                         if (queryDesc->plannedstmt->intoClause != NULL ||
176                                 queryDesc->plannedstmt->rowMarks != NIL)
177                                 estate->es_output_cid = GetCurrentCommandId(true);
178                         break;
179
180                 case CMD_INSERT:
181                 case CMD_DELETE:
182                 case CMD_UPDATE:
183                         estate->es_output_cid = GetCurrentCommandId(true);
184                         break;
185
186                 default:
187                         elog(ERROR, "unrecognized operation code: %d",
188                                  (int) queryDesc->operation);
189                         break;
190         }
191
192         /*
193          * Copy other important information into the EState
194          */
195         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
196         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
197         estate->es_instrument = queryDesc->doInstrument;
198
199         /*
200          * Initialize the plan state tree
201          */
202         InitPlan(queryDesc, eflags);
203
204         MemoryContextSwitchTo(oldcontext);
205 }
206
207 /* ----------------------------------------------------------------
208  *              ExecutorRun
209  *
210  *              This is the main routine of the executor module. It accepts
211  *              the query descriptor from the traffic cop and executes the
212  *              query plan.
213  *
214  *              ExecutorStart must have been called already.
215  *
216  *              If direction is NoMovementScanDirection then nothing is done
217  *              except to start up/shut down the destination.  Otherwise,
218  *              we retrieve up to 'count' tuples in the specified direction.
219  *
220  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
221  *              completion.
222  *
223  *              There is no return value, but output tuples (if any) are sent to
224  *              the destination receiver specified in the QueryDesc; and the number
225  *              of tuples processed at the top level can be found in
226  *              estate->es_processed.
227  *
228  *              We provide a function hook variable that lets loadable plugins
229  *              get control when ExecutorRun is called.  Such a plugin would
230  *              normally call standard_ExecutorRun().
231  *
232  * ----------------------------------------------------------------
233  */
234 void
235 ExecutorRun(QueryDesc *queryDesc,
236                         ScanDirection direction, long count)
237 {
238         if (ExecutorRun_hook)
239                 (*ExecutorRun_hook) (queryDesc, direction, count);
240         else
241                 standard_ExecutorRun(queryDesc, direction, count);
242 }
243
244 void
245 standard_ExecutorRun(QueryDesc *queryDesc,
246                                          ScanDirection direction, long count)
247 {
248         EState     *estate;
249         CmdType         operation;
250         DestReceiver *dest;
251         bool            sendTuples;
252         MemoryContext oldcontext;
253
254         /* sanity checks */
255         Assert(queryDesc != NULL);
256
257         estate = queryDesc->estate;
258
259         Assert(estate != NULL);
260
261         /*
262          * Switch into per-query memory context
263          */
264         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
265
266         /*
267          * extract information from the query descriptor and the query feature.
268          */
269         operation = queryDesc->operation;
270         dest = queryDesc->dest;
271
272         /*
273          * startup tuple receiver, if we will be emitting tuples
274          */
275         estate->es_processed = 0;
276         estate->es_lastoid = InvalidOid;
277
278         sendTuples = (operation == CMD_SELECT ||
279                                   queryDesc->plannedstmt->returningLists);
280
281         if (sendTuples)
282                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
283
284         /*
285          * run plan
286          */
287         if (!ScanDirectionIsNoMovement(direction))
288                 ExecutePlan(estate,
289                                         queryDesc->planstate,
290                                         operation,
291                                         count,
292                                         direction,
293                                         dest);
294
295         /*
296          * shutdown tuple receiver, if we started it
297          */
298         if (sendTuples)
299                 (*dest->rShutdown) (dest);
300
301         MemoryContextSwitchTo(oldcontext);
302 }
303
304 /* ----------------------------------------------------------------
305  *              ExecutorEnd
306  *
307  *              This routine must be called at the end of execution of any
308  *              query plan
309  * ----------------------------------------------------------------
310  */
311 void
312 ExecutorEnd(QueryDesc *queryDesc)
313 {
314         EState     *estate;
315         MemoryContext oldcontext;
316
317         /* sanity checks */
318         Assert(queryDesc != NULL);
319
320         estate = queryDesc->estate;
321
322         Assert(estate != NULL);
323
324         /*
325          * Switch into per-query memory context to run ExecEndPlan
326          */
327         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
328
329         ExecEndPlan(queryDesc->planstate, estate);
330
331         /*
332          * Close the SELECT INTO relation if any
333          */
334         if (estate->es_select_into)
335                 CloseIntoRel(queryDesc);
336
337         /* do away with our snapshots */
338         UnregisterSnapshot(estate->es_snapshot);
339         UnregisterSnapshot(estate->es_crosscheck_snapshot);
340
341         /*
342          * Must switch out of context before destroying it
343          */
344         MemoryContextSwitchTo(oldcontext);
345
346         /*
347          * Release EState and per-query memory context.  This should release
348          * everything the executor has allocated.
349          */
350         FreeExecutorState(estate);
351
352         /* Reset queryDesc fields that no longer point to anything */
353         queryDesc->tupDesc = NULL;
354         queryDesc->estate = NULL;
355         queryDesc->planstate = NULL;
356 }
357
358 /* ----------------------------------------------------------------
359  *              ExecutorRewind
360  *
361  *              This routine may be called on an open queryDesc to rewind it
362  *              to the start.
363  * ----------------------------------------------------------------
364  */
365 void
366 ExecutorRewind(QueryDesc *queryDesc)
367 {
368         EState     *estate;
369         MemoryContext oldcontext;
370
371         /* sanity checks */
372         Assert(queryDesc != NULL);
373
374         estate = queryDesc->estate;
375
376         Assert(estate != NULL);
377
378         /* It's probably not sensible to rescan updating queries */
379         Assert(queryDesc->operation == CMD_SELECT);
380
381         /*
382          * Switch into per-query memory context
383          */
384         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
385
386         /*
387          * rescan plan
388          */
389         ExecReScan(queryDesc->planstate, NULL);
390
391         MemoryContextSwitchTo(oldcontext);
392 }
393
394
395 /*
396  * ExecCheckRTPerms
397  *              Check access permissions for all relations listed in a range table.
398  */
399 static void
400 ExecCheckRTPerms(List *rangeTable)
401 {
402         ListCell   *l;
403
404         foreach(l, rangeTable)
405         {
406                 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
407         }
408 }
409
410 /*
411  * ExecCheckRTEPerms
412  *              Check access permissions for a single RTE.
413  */
414 static void
415 ExecCheckRTEPerms(RangeTblEntry *rte)
416 {
417         AclMode         requiredPerms;
418         Oid                     relOid;
419         Oid                     userid;
420
421         /*
422          * Only plain-relation RTEs need to be checked here.  Function RTEs are
423          * checked by init_fcache when the function is prepared for execution.
424          * Join, subquery, and special RTEs need no checks.
425          */
426         if (rte->rtekind != RTE_RELATION)
427                 return;
428
429         /*
430          * No work if requiredPerms is empty.
431          */
432         requiredPerms = rte->requiredPerms;
433         if (requiredPerms == 0)
434                 return;
435
436         relOid = rte->relid;
437
438         /*
439          * userid to check as: current user unless we have a setuid indication.
440          *
441          * Note: GetUserId() is presently fast enough that there's no harm in
442          * calling it separately for each RTE.  If that stops being true, we could
443          * call it once in ExecCheckRTPerms and pass the userid down from there.
444          * But for now, no need for the extra clutter.
445          */
446         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
447
448         /*
449          * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
450          */
451         if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
452                 != requiredPerms)
453                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
454                                            get_rel_name(relOid));
455 }
456
457 /*
458  * Check that the query does not imply any writes to non-temp tables.
459  */
460 static void
461 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
462 {
463         ListCell   *l;
464
465         /*
466          * CREATE TABLE AS or SELECT INTO?
467          *
468          * XXX should we allow this if the destination is temp?
469          */
470         if (plannedstmt->intoClause != NULL)
471                 goto fail;
472
473         /* Fail if write permissions are requested on any non-temp table */
474         foreach(l, plannedstmt->rtable)
475         {
476                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
477
478                 if (rte->rtekind != RTE_RELATION)
479                         continue;
480
481                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
482                         continue;
483
484                 if (isTempNamespace(get_rel_namespace(rte->relid)))
485                         continue;
486
487                 goto fail;
488         }
489
490         return;
491
492 fail:
493         ereport(ERROR,
494                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
495                          errmsg("transaction is read-only")));
496 }
497
498
499 /* ----------------------------------------------------------------
500  *              InitPlan
501  *
502  *              Initializes the query plan: open files, allocate storage
503  *              and start up the rule manager
504  * ----------------------------------------------------------------
505  */
506 static void
507 InitPlan(QueryDesc *queryDesc, int eflags)
508 {
509         CmdType         operation = queryDesc->operation;
510         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
511         Plan       *plan = plannedstmt->planTree;
512         List       *rangeTable = plannedstmt->rtable;
513         EState     *estate = queryDesc->estate;
514         PlanState  *planstate;
515         TupleDesc       tupType;
516         ListCell   *l;
517         int                     i;
518
519         /*
520          * Do permissions checks
521          */
522         ExecCheckRTPerms(rangeTable);
523
524         /*
525          * initialize the node's execution state
526          */
527         estate->es_range_table = rangeTable;
528
529         /*
530          * initialize result relation stuff
531          */
532         if (plannedstmt->resultRelations)
533         {
534                 List       *resultRelations = plannedstmt->resultRelations;
535                 int                     numResultRelations = list_length(resultRelations);
536                 ResultRelInfo *resultRelInfos;
537                 ResultRelInfo *resultRelInfo;
538
539                 resultRelInfos = (ResultRelInfo *)
540                         palloc(numResultRelations * sizeof(ResultRelInfo));
541                 resultRelInfo = resultRelInfos;
542                 foreach(l, resultRelations)
543                 {
544                         Index           resultRelationIndex = lfirst_int(l);
545                         Oid                     resultRelationOid;
546                         Relation        resultRelation;
547
548                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
549                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
550                         InitResultRelInfo(resultRelInfo,
551                                                           resultRelation,
552                                                           resultRelationIndex,
553                                                           operation,
554                                                           estate->es_instrument);
555                         resultRelInfo++;
556                 }
557                 estate->es_result_relations = resultRelInfos;
558                 estate->es_num_result_relations = numResultRelations;
559                 /* Initialize to first or only result rel */
560                 estate->es_result_relation_info = resultRelInfos;
561         }
562         else
563         {
564                 /*
565                  * if no result relation, then set state appropriately
566                  */
567                 estate->es_result_relations = NULL;
568                 estate->es_num_result_relations = 0;
569                 estate->es_result_relation_info = NULL;
570         }
571
572         /*
573          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
574          * flag appropriately so that the plan tree will be initialized with the
575          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
576          */
577         estate->es_select_into = false;
578         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
579         {
580                 estate->es_select_into = true;
581                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
582         }
583
584         /*
585          * Have to lock relations selected FOR UPDATE/FOR SHARE before we
586          * initialize the plan tree, else we'd be doing a lock upgrade. While we
587          * are at it, build the ExecRowMark list.
588          */
589         estate->es_rowMarks = NIL;
590         foreach(l, plannedstmt->rowMarks)
591         {
592                 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
593                 Oid                     relid;
594                 Relation        relation;
595                 ExecRowMark *erm;
596
597                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
598                 if (rc->isParent)
599                         continue;
600
601                 relid = getrelid(rc->rti, rangeTable);
602                 relation = heap_open(relid, RowShareLock);
603                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
604                 erm->relation = relation;
605                 erm->rti = rc->rti;
606                 erm->prti = rc->prti;
607                 erm->forUpdate = rc->forUpdate;
608                 erm->noWait = rc->noWait;
609                 /* We'll locate the junk attrs below */
610                 erm->ctidAttNo = InvalidAttrNumber;
611                 erm->toidAttNo = InvalidAttrNumber;
612                 ItemPointerSetInvalid(&(erm->curCtid));
613                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
614         }
615
616         /*
617          * Initialize the executor "tuple" table.  We need slots for all the plan
618          * nodes, plus possibly output slots for the junkfilter(s). At this point
619          * we aren't sure if we need junkfilters, so just add slots for them
620          * unconditionally.  Also, if it's not a SELECT, set up a slot for use for
621          * trigger output tuples.  Also, one for RETURNING-list evaluation.
622          */
623         {
624                 int                     nSlots;
625
626                 /* Slots for the main plan tree */
627                 nSlots = ExecCountSlotsNode(plan);
628                 /* Add slots for subplans and initplans */
629                 foreach(l, plannedstmt->subplans)
630                 {
631                         Plan       *subplan = (Plan *) lfirst(l);
632
633                         nSlots += ExecCountSlotsNode(subplan);
634                 }
635                 /* Add slots for junkfilter(s) */
636                 if (plannedstmt->resultRelations != NIL)
637                         nSlots += list_length(plannedstmt->resultRelations);
638                 else
639                         nSlots += 1;
640                 if (operation != CMD_SELECT)
641                         nSlots++;                       /* for es_trig_tuple_slot */
642                 if (plannedstmt->returningLists)
643                         nSlots++;                       /* for RETURNING projection */
644
645                 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
646
647                 if (operation != CMD_SELECT)
648                         estate->es_trig_tuple_slot =
649                                 ExecAllocTableSlot(estate->es_tupleTable);
650         }
651
652         /* mark EvalPlanQual not active */
653         estate->es_plannedstmt = plannedstmt;
654         estate->es_evalPlanQual = NULL;
655         estate->es_evTupleNull = NULL;
656         estate->es_evTuple = NULL;
657         estate->es_useEvalPlan = false;
658
659         /*
660          * Initialize private state information for each SubPlan.  We must do this
661          * before running ExecInitNode on the main query tree, since
662          * ExecInitSubPlan expects to be able to find these entries.
663          */
664         Assert(estate->es_subplanstates == NIL);
665         i = 1;                                          /* subplan indices count from 1 */
666         foreach(l, plannedstmt->subplans)
667         {
668                 Plan       *subplan = (Plan *) lfirst(l);
669                 PlanState  *subplanstate;
670                 int                     sp_eflags;
671
672                 /*
673                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
674                  * it is a parameterless subplan (not initplan), we suggest that it be
675                  * prepared to handle REWIND efficiently; otherwise there is no need.
676                  */
677                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
678                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
679                         sp_eflags |= EXEC_FLAG_REWIND;
680
681                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
682
683                 estate->es_subplanstates = lappend(estate->es_subplanstates,
684                                                                                    subplanstate);
685
686                 i++;
687         }
688
689         /*
690          * Initialize the private state information for all the nodes in the query
691          * tree.  This opens files, allocates storage and leaves us ready to start
692          * processing tuples.
693          */
694         planstate = ExecInitNode(plan, estate, eflags);
695
696         /*
697          * Get the tuple descriptor describing the type of tuples to return. (this
698          * is especially important if we are creating a relation with "SELECT
699          * INTO")
700          */
701         tupType = ExecGetResultType(planstate);
702
703         /*
704          * Initialize the junk filter if needed.  SELECT and INSERT queries need a
705          * filter if there are any junk attrs in the tlist.  UPDATE and
706          * DELETE always need a filter, since there's always a junk 'ctid'
707          * attribute present --- no need to look first.
708          *
709          * This section of code is also a convenient place to verify that the
710          * output of an INSERT or UPDATE matches the target table(s).
711          */
712         {
713                 bool            junk_filter_needed = false;
714                 ListCell   *tlist;
715
716                 switch (operation)
717                 {
718                         case CMD_SELECT:
719                         case CMD_INSERT:
720                                 foreach(tlist, plan->targetlist)
721                                 {
722                                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
723
724                                         if (tle->resjunk)
725                                         {
726                                                 junk_filter_needed = true;
727                                                 break;
728                                         }
729                                 }
730                                 break;
731                         case CMD_UPDATE:
732                         case CMD_DELETE:
733                                 junk_filter_needed = true;
734                                 break;
735                         default:
736                                 break;
737                 }
738
739                 if (junk_filter_needed)
740                 {
741                         /*
742                          * If there are multiple result relations, each one needs its own
743                          * junk filter.  Note this is only possible for UPDATE/DELETE, so
744                          * we can't be fooled by some needing a filter and some not.
745                          */
746                         if (list_length(plannedstmt->resultRelations) > 1)
747                         {
748                                 PlanState **appendplans;
749                                 int                     as_nplans;
750                                 ResultRelInfo *resultRelInfo;
751
752                                 /* Top plan had better be an Append here. */
753                                 Assert(IsA(plan, Append));
754                                 Assert(((Append *) plan)->isTarget);
755                                 Assert(IsA(planstate, AppendState));
756                                 appendplans = ((AppendState *) planstate)->appendplans;
757                                 as_nplans = ((AppendState *) planstate)->as_nplans;
758                                 Assert(as_nplans == estate->es_num_result_relations);
759                                 resultRelInfo = estate->es_result_relations;
760                                 for (i = 0; i < as_nplans; i++)
761                                 {
762                                         PlanState  *subplan = appendplans[i];
763                                         JunkFilter *j;
764
765                                         if (operation == CMD_UPDATE)
766                                                 ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
767                                                                                         subplan->plan->targetlist);
768
769                                         j = ExecInitJunkFilter(subplan->plan->targetlist,
770                                                         resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
771                                                                   ExecAllocTableSlot(estate->es_tupleTable));
772
773                                         /*
774                                          * Since it must be UPDATE/DELETE, there had better be a
775                                          * "ctid" junk attribute in the tlist ... but ctid could
776                                          * be at a different resno for each result relation. We
777                                          * look up the ctid resnos now and save them in the
778                                          * junkfilters.
779                                          */
780                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
781                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
782                                                 elog(ERROR, "could not find junk ctid column");
783                                         resultRelInfo->ri_junkFilter = j;
784                                         resultRelInfo++;
785                                 }
786
787                                 /*
788                                  * Set active junkfilter too; at this point ExecInitAppend has
789                                  * already selected an active result relation...
790                                  */
791                                 estate->es_junkFilter =
792                                         estate->es_result_relation_info->ri_junkFilter;
793
794                                 /*
795                                  * We currently can't support rowmarks in this case, because
796                                  * the associated junk CTIDs might have different resnos in
797                                  * different subplans.
798                                  */
799                                 if (estate->es_rowMarks)
800                                         ereport(ERROR,
801                                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
802                                                          errmsg("SELECT FOR UPDATE/SHARE is not supported within a query with multiple result relations")));
803                         }
804                         else
805                         {
806                                 /* Normal case with just one JunkFilter */
807                                 JunkFilter *j;
808
809                                 if (operation == CMD_INSERT || operation == CMD_UPDATE)
810                                         ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc,
811                                                                                 planstate->plan->targetlist);
812
813                                 j = ExecInitJunkFilter(planstate->plan->targetlist,
814                                                                            tupType->tdhasoid,
815                                                                   ExecAllocTableSlot(estate->es_tupleTable));
816                                 estate->es_junkFilter = j;
817                                 if (estate->es_result_relation_info)
818                                         estate->es_result_relation_info->ri_junkFilter = j;
819
820                                 if (operation == CMD_SELECT)
821                                 {
822                                         /* For SELECT, want to return the cleaned tuple type */
823                                         tupType = j->jf_cleanTupType;
824                                 }
825                                 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
826                                 {
827                                         /* For UPDATE/DELETE, find the ctid junk attr now */
828                                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
829                                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
830                                                 elog(ERROR, "could not find junk ctid column");
831                                 }
832
833                                 /* For SELECT FOR UPDATE/SHARE, find the junk attrs now */
834                                 foreach(l, estate->es_rowMarks)
835                                 {
836                                         ExecRowMark *erm = (ExecRowMark *) lfirst(l);
837                                         char            resname[32];
838
839                                         /* always need the ctid */
840                                         snprintf(resname, sizeof(resname), "ctid%u",
841                                                          erm->prti);
842                                         erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
843                                         if (!AttributeNumberIsValid(erm->ctidAttNo))
844                                                 elog(ERROR, "could not find junk \"%s\" column",
845                                                          resname);
846                                         /* if child relation, need tableoid too */
847                                         if (erm->rti != erm->prti)
848                                         {
849                                                 snprintf(resname, sizeof(resname), "tableoid%u",
850                                                                  erm->prti);
851                                                 erm->toidAttNo = ExecFindJunkAttribute(j, resname);
852                                                 if (!AttributeNumberIsValid(erm->toidAttNo))
853                                                         elog(ERROR, "could not find junk \"%s\" column",
854                                                                  resname);
855                                         }
856                                 }
857                         }
858                 }
859                 else
860                 {
861                         if (operation == CMD_INSERT)
862                                 ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc,
863                                                                         planstate->plan->targetlist);
864
865                         estate->es_junkFilter = NULL;
866                         if (estate->es_rowMarks)
867                                 elog(ERROR, "SELECT FOR UPDATE/SHARE, but no junk columns");
868                 }
869         }
870
871         /*
872          * Initialize RETURNING projections if needed.
873          */
874         if (plannedstmt->returningLists)
875         {
876                 TupleTableSlot *slot;
877                 ExprContext *econtext;
878                 ResultRelInfo *resultRelInfo;
879
880                 /*
881                  * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
882                  * We assume all the sublists will generate the same output tupdesc.
883                  */
884                 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
885                                                                  false);
886
887                 /* Set up a slot for the output of the RETURNING projection(s) */
888                 slot = ExecAllocTableSlot(estate->es_tupleTable);
889                 ExecSetSlotDescriptor(slot, tupType);
890                 /* Need an econtext too */
891                 econtext = CreateExprContext(estate);
892
893                 /*
894                  * Build a projection for each result rel.      Note that any SubPlans in
895                  * the RETURNING lists get attached to the topmost plan node.
896                  */
897                 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
898                 resultRelInfo = estate->es_result_relations;
899                 foreach(l, plannedstmt->returningLists)
900                 {
901                         List       *rlist = (List *) lfirst(l);
902                         List       *rliststate;
903
904                         rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
905                         resultRelInfo->ri_projectReturning =
906                                 ExecBuildProjectionInfo(rliststate, econtext, slot,
907                                                                          resultRelInfo->ri_RelationDesc->rd_att);
908                         resultRelInfo++;
909                 }
910         }
911
912         queryDesc->tupDesc = tupType;
913         queryDesc->planstate = planstate;
914
915         /*
916          * If doing SELECT INTO, initialize the "into" relation.  We must wait
917          * till now so we have the "clean" result tuple type to create the new
918          * table from.
919          *
920          * If EXPLAIN, skip creating the "into" relation.
921          */
922         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
923                 OpenIntoRel(queryDesc);
924 }
925
926 /*
927  * Initialize ResultRelInfo data for one result relation
928  */
929 void
930 InitResultRelInfo(ResultRelInfo *resultRelInfo,
931                                   Relation resultRelationDesc,
932                                   Index resultRelationIndex,
933                                   CmdType operation,
934                                   bool doInstrument)
935 {
936         /*
937          * Check valid relkind ... parser and/or planner should have noticed this
938          * already, but let's make sure.
939          */
940         switch (resultRelationDesc->rd_rel->relkind)
941         {
942                 case RELKIND_RELATION:
943                         /* OK */
944                         break;
945                 case RELKIND_SEQUENCE:
946                         ereport(ERROR,
947                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
948                                          errmsg("cannot change sequence \"%s\"",
949                                                         RelationGetRelationName(resultRelationDesc))));
950                         break;
951                 case RELKIND_TOASTVALUE:
952                         ereport(ERROR,
953                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
954                                          errmsg("cannot change TOAST relation \"%s\"",
955                                                         RelationGetRelationName(resultRelationDesc))));
956                         break;
957                 case RELKIND_VIEW:
958                         ereport(ERROR,
959                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
960                                          errmsg("cannot change view \"%s\"",
961                                                         RelationGetRelationName(resultRelationDesc))));
962                         break;
963                 default:
964                         ereport(ERROR,
965                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
966                                          errmsg("cannot change relation \"%s\"",
967                                                         RelationGetRelationName(resultRelationDesc))));
968                         break;
969         }
970
971         /* OK, fill in the node */
972         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
973         resultRelInfo->type = T_ResultRelInfo;
974         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
975         resultRelInfo->ri_RelationDesc = resultRelationDesc;
976         resultRelInfo->ri_NumIndices = 0;
977         resultRelInfo->ri_IndexRelationDescs = NULL;
978         resultRelInfo->ri_IndexRelationInfo = NULL;
979         /* make a copy so as not to depend on relcache info not changing... */
980         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
981         if (resultRelInfo->ri_TrigDesc)
982         {
983                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
984
985                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
986                         palloc0(n * sizeof(FmgrInfo));
987                 if (doInstrument)
988                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
989                 else
990                         resultRelInfo->ri_TrigInstrument = NULL;
991         }
992         else
993         {
994                 resultRelInfo->ri_TrigFunctions = NULL;
995                 resultRelInfo->ri_TrigInstrument = NULL;
996         }
997         resultRelInfo->ri_ConstraintExprs = NULL;
998         resultRelInfo->ri_junkFilter = NULL;
999         resultRelInfo->ri_projectReturning = NULL;
1000
1001         /*
1002          * If there are indices on the result relation, open them and save
1003          * descriptors in the result relation info, so that we can add new index
1004          * entries for the tuples we add/update.  We need not do this for a
1005          * DELETE, however, since deletion doesn't affect indexes.
1006          */
1007         if (resultRelationDesc->rd_rel->relhasindex &&
1008                 operation != CMD_DELETE)
1009                 ExecOpenIndices(resultRelInfo);
1010 }
1011
1012 /*
1013  * Verify that the tuples to be produced by INSERT or UPDATE match the
1014  * target relation's rowtype
1015  *
1016  * We do this to guard against stale plans.  If plan invalidation is
1017  * functioning properly then we should never get a failure here, but better
1018  * safe than sorry.  Note that this is called after we have obtained lock
1019  * on the target rel, so the rowtype can't change underneath us.
1020  *
1021  * The plan output is represented by its targetlist, because that makes
1022  * handling the dropped-column case easier.
1023  */
1024 static void
1025 ExecCheckPlanOutput(Relation resultRel, List *targetList)
1026 {
1027         TupleDesc       resultDesc = RelationGetDescr(resultRel);
1028         int                     attno = 0;
1029         ListCell   *lc;
1030
1031         foreach(lc, targetList)
1032         {
1033                 TargetEntry *tle = (TargetEntry *) lfirst(lc);
1034                 Form_pg_attribute attr;
1035
1036                 if (tle->resjunk)
1037                         continue;                       /* ignore junk tlist items */
1038
1039                 if (attno >= resultDesc->natts)
1040                         ereport(ERROR,
1041                                         (errcode(ERRCODE_DATATYPE_MISMATCH),
1042                                          errmsg("table row type and query-specified row type do not match"),
1043                                          errdetail("Query has too many columns.")));
1044                 attr = resultDesc->attrs[attno++];
1045
1046                 if (!attr->attisdropped)
1047                 {
1048                         /* Normal case: demand type match */
1049                         if (exprType((Node *) tle->expr) != attr->atttypid)
1050                                 ereport(ERROR,
1051                                                 (errcode(ERRCODE_DATATYPE_MISMATCH),
1052                                                  errmsg("table row type and query-specified row type do not match"),
1053                                                  errdetail("Table has type %s at ordinal position %d, but query expects %s.",
1054                                                                    format_type_be(attr->atttypid),
1055                                                                    attno,
1056                                                                    format_type_be(exprType((Node *) tle->expr)))));
1057                 }
1058                 else
1059                 {
1060                         /*
1061                          * For a dropped column, we can't check atttypid (it's likely 0).
1062                          * In any case the planner has most likely inserted an INT4 null.
1063                          * What we insist on is just *some* NULL constant.
1064                          */
1065                         if (!IsA(tle->expr, Const) ||
1066                                 !((Const *) tle->expr)->constisnull)
1067                                 ereport(ERROR,
1068                                                 (errcode(ERRCODE_DATATYPE_MISMATCH),
1069                                                  errmsg("table row type and query-specified row type do not match"),
1070                                                  errdetail("Query provides a value for a dropped column at ordinal position %d.",
1071                                                                    attno)));
1072                 }
1073         }
1074         if (attno != resultDesc->natts)
1075                 ereport(ERROR,
1076                                 (errcode(ERRCODE_DATATYPE_MISMATCH),
1077                                  errmsg("table row type and query-specified row type do not match"),
1078                                  errdetail("Query has too few columns.")));
1079 }
1080
1081 /*
1082  *              ExecGetTriggerResultRel
1083  *
1084  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
1085  * triggers are fired on one of the result relations of the query, and so
1086  * we can just return a member of the es_result_relations array.  (Note: in
1087  * self-join situations there might be multiple members with the same OID;
1088  * if so it doesn't matter which one we pick.)  However, it is sometimes
1089  * necessary to fire triggers on other relations; this happens mainly when an
1090  * RI update trigger queues additional triggers on other relations, which will
1091  * be processed in the context of the outer query.      For efficiency's sake,
1092  * we want to have a ResultRelInfo for those triggers too; that can avoid
1093  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
1094  * ANALYZE to report the runtimes of such triggers.)  So we make additional
1095  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1096  */
1097 ResultRelInfo *
1098 ExecGetTriggerResultRel(EState *estate, Oid relid)
1099 {
1100         ResultRelInfo *rInfo;
1101         int                     nr;
1102         ListCell   *l;
1103         Relation        rel;
1104         MemoryContext oldcontext;
1105
1106         /* First, search through the query result relations */
1107         rInfo = estate->es_result_relations;
1108         nr = estate->es_num_result_relations;
1109         while (nr > 0)
1110         {
1111                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1112                         return rInfo;
1113                 rInfo++;
1114                 nr--;
1115         }
1116         /* Nope, but maybe we already made an extra ResultRelInfo for it */
1117         foreach(l, estate->es_trig_target_relations)
1118         {
1119                 rInfo = (ResultRelInfo *) lfirst(l);
1120                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1121                         return rInfo;
1122         }
1123         /* Nope, so we need a new one */
1124
1125         /*
1126          * Open the target relation's relcache entry.  We assume that an
1127          * appropriate lock is still held by the backend from whenever the trigger
1128          * event got queued, so we need take no new lock here.
1129          */
1130         rel = heap_open(relid, NoLock);
1131
1132         /*
1133          * Make the new entry in the right context.  Currently, we don't need any
1134          * index information in ResultRelInfos used only for triggers, so tell
1135          * InitResultRelInfo it's a DELETE.
1136          */
1137         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1138         rInfo = makeNode(ResultRelInfo);
1139         InitResultRelInfo(rInfo,
1140                                           rel,
1141                                           0,            /* dummy rangetable index */
1142                                           CMD_DELETE,
1143                                           estate->es_instrument);
1144         estate->es_trig_target_relations =
1145                 lappend(estate->es_trig_target_relations, rInfo);
1146         MemoryContextSwitchTo(oldcontext);
1147
1148         return rInfo;
1149 }
1150
1151 /*
1152  *              ExecContextForcesOids
1153  *
1154  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1155  * we need to ensure that result tuples have space for an OID iff they are
1156  * going to be stored into a relation that has OIDs.  In other contexts
1157  * we are free to choose whether to leave space for OIDs in result tuples
1158  * (we generally don't want to, but we do if a physical-tlist optimization
1159  * is possible).  This routine checks the plan context and returns TRUE if the
1160  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1161  * *hasoids is set to the required value.
1162  *
1163  * One reason this is ugly is that all plan nodes in the plan tree will emit
1164  * tuples with space for an OID, though we really only need the topmost node
1165  * to do so.  However, node types like Sort don't project new tuples but just
1166  * return their inputs, and in those cases the requirement propagates down
1167  * to the input node.  Eventually we might make this code smart enough to
1168  * recognize how far down the requirement really goes, but for now we just
1169  * make all plan nodes do the same thing if the top level forces the choice.
1170  *
1171  * We assume that estate->es_result_relation_info is already set up to
1172  * describe the target relation.  Note that in an UPDATE that spans an
1173  * inheritance tree, some of the target relations may have OIDs and some not.
1174  * We have to make the decisions on a per-relation basis as we initialize
1175  * each of the child plans of the topmost Append plan.
1176  *
1177  * SELECT INTO is even uglier, because we don't have the INTO relation's
1178  * descriptor available when this code runs; we have to look aside at a
1179  * flag set by InitPlan().
1180  */
1181 bool
1182 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1183 {
1184         if (planstate->state->es_select_into)
1185         {
1186                 *hasoids = planstate->state->es_into_oids;
1187                 return true;
1188         }
1189         else
1190         {
1191                 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1192
1193                 if (ri != NULL)
1194                 {
1195                         Relation        rel = ri->ri_RelationDesc;
1196
1197                         if (rel != NULL)
1198                         {
1199                                 *hasoids = rel->rd_rel->relhasoids;
1200                                 return true;
1201                         }
1202                 }
1203         }
1204
1205         return false;
1206 }
1207
1208 /* ----------------------------------------------------------------
1209  *              ExecEndPlan
1210  *
1211  *              Cleans up the query plan -- closes files and frees up storage
1212  *
1213  * NOTE: we are no longer very worried about freeing storage per se
1214  * in this code; FreeExecutorState should be guaranteed to release all
1215  * memory that needs to be released.  What we are worried about doing
1216  * is closing relations and dropping buffer pins.  Thus, for example,
1217  * tuple tables must be cleared or dropped to ensure pins are released.
1218  * ----------------------------------------------------------------
1219  */
1220 static void
1221 ExecEndPlan(PlanState *planstate, EState *estate)
1222 {
1223         ResultRelInfo *resultRelInfo;
1224         int                     i;
1225         ListCell   *l;
1226
1227         /*
1228          * shut down any PlanQual processing we were doing
1229          */
1230         if (estate->es_evalPlanQual != NULL)
1231                 EndEvalPlanQual(estate);
1232
1233         /*
1234          * shut down the node-type-specific query processing
1235          */
1236         ExecEndNode(planstate);
1237
1238         /*
1239          * for subplans too
1240          */
1241         foreach(l, estate->es_subplanstates)
1242         {
1243                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1244
1245                 ExecEndNode(subplanstate);
1246         }
1247
1248         /*
1249          * destroy the executor "tuple" table.
1250          */
1251         ExecDropTupleTable(estate->es_tupleTable, true);
1252         estate->es_tupleTable = NULL;
1253
1254         /*
1255          * close the result relation(s) if any, but hold locks until xact commit.
1256          */
1257         resultRelInfo = estate->es_result_relations;
1258         for (i = estate->es_num_result_relations; i > 0; i--)
1259         {
1260                 /* Close indices and then the relation itself */
1261                 ExecCloseIndices(resultRelInfo);
1262                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1263                 resultRelInfo++;
1264         }
1265
1266         /*
1267          * likewise close any trigger target relations
1268          */
1269         foreach(l, estate->es_trig_target_relations)
1270         {
1271                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1272                 /* Close indices and then the relation itself */
1273                 ExecCloseIndices(resultRelInfo);
1274                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1275         }
1276
1277         /*
1278          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1279          */
1280         foreach(l, estate->es_rowMarks)
1281         {
1282                 ExecRowMark *erm = lfirst(l);
1283
1284                 heap_close(erm->relation, NoLock);
1285         }
1286 }
1287
1288 /* ----------------------------------------------------------------
1289  *              ExecutePlan
1290  *
1291  *              Processes the query plan until we have processed 'numberTuples' tuples,
1292  *              moving in the specified direction.
1293  *
1294  *              Runs to completion if numberTuples is 0
1295  *
1296  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1297  * user can see it
1298  * ----------------------------------------------------------------
1299  */
1300 static void
1301 ExecutePlan(EState *estate,
1302                         PlanState *planstate,
1303                         CmdType operation,
1304                         long numberTuples,
1305                         ScanDirection direction,
1306                         DestReceiver *dest)
1307 {
1308         JunkFilter *junkfilter;
1309         TupleTableSlot *planSlot;
1310         TupleTableSlot *slot;
1311         ItemPointer tupleid = NULL;
1312         ItemPointerData tuple_ctid;
1313         long            current_tuple_count;
1314
1315         /*
1316          * initialize local variables
1317          */
1318         current_tuple_count = 0;
1319
1320         /*
1321          * Set the direction.
1322          */
1323         estate->es_direction = direction;
1324
1325         /*
1326          * Process BEFORE EACH STATEMENT triggers
1327          */
1328         switch (operation)
1329         {
1330                 case CMD_UPDATE:
1331                         ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1332                         break;
1333                 case CMD_DELETE:
1334                         ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1335                         break;
1336                 case CMD_INSERT:
1337                         ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1338                         break;
1339                 default:
1340                         /* do nothing */
1341                         break;
1342         }
1343
1344         /*
1345          * Loop until we've processed the proper number of tuples from the plan.
1346          */
1347         for (;;)
1348         {
1349                 /* Reset the per-output-tuple exprcontext */
1350                 ResetPerTupleExprContext(estate);
1351
1352                 /*
1353                  * Execute the plan and obtain a tuple
1354                  */
1355 lnext:  ;
1356                 if (estate->es_useEvalPlan)
1357                 {
1358                         planSlot = EvalPlanQualNext(estate);
1359                         if (TupIsNull(planSlot))
1360                                 planSlot = ExecProcNode(planstate);
1361                 }
1362                 else
1363                         planSlot = ExecProcNode(planstate);
1364
1365                 /*
1366                  * if the tuple is null, then we assume there is nothing more to
1367                  * process so we just end the loop...
1368                  */
1369                 if (TupIsNull(planSlot))
1370                         break;
1371                 slot = planSlot;
1372
1373                 /*
1374                  * If we have a junk filter, then project a new tuple with the junk
1375                  * removed.
1376                  *
1377                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1378                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1379                  * because that tuple slot has the wrong descriptor.)
1380                  *
1381                  * But first, extract all the junk information we need.
1382                  */
1383                 if ((junkfilter = estate->es_junkFilter) != NULL)
1384                 {
1385                         /*
1386                          * Process any FOR UPDATE or FOR SHARE locking requested.
1387                          */
1388                         if (estate->es_rowMarks != NIL)
1389                         {
1390                                 ListCell   *l;
1391
1392                 lmark:  ;
1393                                 foreach(l, estate->es_rowMarks)
1394                                 {
1395                                         ExecRowMark *erm = lfirst(l);
1396                                         Datum           datum;
1397                                         bool            isNull;
1398                                         HeapTupleData tuple;
1399                                         Buffer          buffer;
1400                                         ItemPointerData update_ctid;
1401                                         TransactionId update_xmax;
1402                                         TupleTableSlot *newSlot;
1403                                         LockTupleMode lockmode;
1404                                         HTSU_Result test;
1405
1406                                         /* if child rel, must check whether it produced this row */
1407                                         if (erm->rti != erm->prti)
1408                                         {
1409                                                 Oid             tableoid;
1410
1411                                                 datum = ExecGetJunkAttribute(slot,
1412                                                                                                          erm->toidAttNo,
1413                                                                                                          &isNull);
1414                                                 /* shouldn't ever get a null result... */
1415                                                 if (isNull)
1416                                                         elog(ERROR, "tableoid is NULL");
1417                                                 tableoid = DatumGetObjectId(datum);
1418
1419                                                 if (tableoid != RelationGetRelid(erm->relation))
1420                                                 {
1421                                                         /* this child is inactive right now */
1422                                                         ItemPointerSetInvalid(&(erm->curCtid));
1423                                                         continue;
1424                                                 }
1425                                         }
1426
1427                                         /* okay, fetch the tuple by ctid */
1428                                         datum = ExecGetJunkAttribute(slot,
1429                                                                                                  erm->ctidAttNo,
1430                                                                                                  &isNull);
1431                                         /* shouldn't ever get a null result... */
1432                                         if (isNull)
1433                                                 elog(ERROR, "ctid is NULL");
1434                                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1435
1436                                         if (erm->forUpdate)
1437                                                 lockmode = LockTupleExclusive;
1438                                         else
1439                                                 lockmode = LockTupleShared;
1440
1441                                         test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1442                                                                                    &update_ctid, &update_xmax,
1443                                                                                    estate->es_output_cid,
1444                                                                                    lockmode, erm->noWait);
1445                                         ReleaseBuffer(buffer);
1446                                         switch (test)
1447                                         {
1448                                                 case HeapTupleSelfUpdated:
1449                                                         /* treat it as deleted; do not process */
1450                                                         goto lnext;
1451
1452                                                 case HeapTupleMayBeUpdated:
1453                                                         break;
1454
1455                                                 case HeapTupleUpdated:
1456                                                         if (IsXactIsoLevelSerializable)
1457                                                                 ereport(ERROR,
1458                                                                  (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1459                                                                   errmsg("could not serialize access due to concurrent update")));
1460                                                         if (!ItemPointerEquals(&update_ctid,
1461                                                                                                    &tuple.t_self))
1462                                                         {
1463                                                                 /* updated, so look at updated version */
1464                                                                 newSlot = EvalPlanQual(estate,
1465                                                                                                            erm->rti,
1466                                                                                                            &update_ctid,
1467                                                                                                            update_xmax);
1468                                                                 if (!TupIsNull(newSlot))
1469                                                                 {
1470                                                                         slot = planSlot = newSlot;
1471                                                                         estate->es_useEvalPlan = true;
1472                                                                         goto lmark;
1473                                                                 }
1474                                                         }
1475
1476                                                         /*
1477                                                          * if tuple was deleted or PlanQual failed for
1478                                                          * updated tuple - we must not return this tuple!
1479                                                          */
1480                                                         goto lnext;
1481
1482                                                 default:
1483                                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1484                                                                  test);
1485                                         }
1486
1487                                         /* Remember tuple TID for WHERE CURRENT OF */
1488                                         erm->curCtid = tuple.t_self;
1489                                 }
1490                         }
1491
1492                         /*
1493                          * extract the 'ctid' junk attribute.
1494                          */
1495                         if (operation == CMD_UPDATE || operation == CMD_DELETE)
1496                         {
1497                                 Datum           datum;
1498                                 bool            isNull;
1499
1500                                 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1501                                                                                          &isNull);
1502                                 /* shouldn't ever get a null result... */
1503                                 if (isNull)
1504                                         elog(ERROR, "ctid is NULL");
1505
1506                                 tupleid = (ItemPointer) DatumGetPointer(datum);
1507                                 tuple_ctid = *tupleid;  /* make sure we don't free the ctid!! */
1508                                 tupleid = &tuple_ctid;
1509                         }
1510
1511                         /*
1512                          * Create a new "clean" tuple with all junk attributes removed. We
1513                          * don't need to do this for DELETE, however (there will in fact
1514                          * be no non-junk attributes in a DELETE!)
1515                          */
1516                         if (operation != CMD_DELETE)
1517                                 slot = ExecFilterJunk(junkfilter, slot);
1518                 }
1519
1520                 /*
1521                  * now that we have a tuple, do the appropriate thing with it.. either
1522                  * send it to the output destination, add it to a relation someplace,
1523                  * delete it from a relation, or modify some of its attributes.
1524                  */
1525                 switch (operation)
1526                 {
1527                         case CMD_SELECT:
1528                                 ExecSelect(slot, dest, estate);
1529                                 break;
1530
1531                         case CMD_INSERT:
1532                                 ExecInsert(slot, tupleid, planSlot, dest, estate);
1533                                 break;
1534
1535                         case CMD_DELETE:
1536                                 ExecDelete(tupleid, planSlot, dest, estate);
1537                                 break;
1538
1539                         case CMD_UPDATE:
1540                                 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1541                                 break;
1542
1543                         default:
1544                                 elog(ERROR, "unrecognized operation code: %d",
1545                                          (int) operation);
1546                                 break;
1547                 }
1548
1549                 /*
1550                  * check our tuple count.. if we've processed the proper number then
1551                  * quit, else loop again and process more tuples.  Zero numberTuples
1552                  * means no limit.
1553                  */
1554                 current_tuple_count++;
1555                 if (numberTuples && numberTuples == current_tuple_count)
1556                         break;
1557         }
1558
1559         /*
1560          * Process AFTER EACH STATEMENT triggers
1561          */
1562         switch (operation)
1563         {
1564                 case CMD_UPDATE:
1565                         ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1566                         break;
1567                 case CMD_DELETE:
1568                         ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1569                         break;
1570                 case CMD_INSERT:
1571                         ExecASInsertTriggers(estate, estate->es_result_relation_info);
1572                         break;
1573                 default:
1574                         /* do nothing */
1575                         break;
1576         }
1577 }
1578
1579 /* ----------------------------------------------------------------
1580  *              ExecSelect
1581  *
1582  *              SELECTs are easy.. we just pass the tuple to the appropriate
1583  *              output function.
1584  * ----------------------------------------------------------------
1585  */
1586 static void
1587 ExecSelect(TupleTableSlot *slot,
1588                    DestReceiver *dest,
1589                    EState *estate)
1590 {
1591         (*dest->receiveSlot) (slot, dest);
1592         IncrRetrieved();
1593         (estate->es_processed)++;
1594 }
1595
1596 /* ----------------------------------------------------------------
1597  *              ExecInsert
1598  *
1599  *              INSERTs are trickier.. we have to insert the tuple into
1600  *              the base relation and insert appropriate tuples into the
1601  *              index relations.
1602  * ----------------------------------------------------------------
1603  */
1604 static void
1605 ExecInsert(TupleTableSlot *slot,
1606                    ItemPointer tupleid,
1607                    TupleTableSlot *planSlot,
1608                    DestReceiver *dest,
1609                    EState *estate)
1610 {
1611         HeapTuple       tuple;
1612         ResultRelInfo *resultRelInfo;
1613         Relation        resultRelationDesc;
1614         Oid                     newId;
1615
1616         /*
1617          * get the heap tuple out of the tuple table slot, making sure we have a
1618          * writable copy
1619          */
1620         tuple = ExecMaterializeSlot(slot);
1621
1622         /*
1623          * get information on the (current) result relation
1624          */
1625         resultRelInfo = estate->es_result_relation_info;
1626         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1627
1628         /* BEFORE ROW INSERT Triggers */
1629         if (resultRelInfo->ri_TrigDesc &&
1630                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1631         {
1632                 HeapTuple       newtuple;
1633
1634                 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1635
1636                 if (newtuple == NULL)   /* "do nothing" */
1637                         return;
1638
1639                 if (newtuple != tuple)  /* modified by Trigger(s) */
1640                 {
1641                         /*
1642                          * Put the modified tuple into a slot for convenience of routines
1643                          * below.  We assume the tuple was allocated in per-tuple memory
1644                          * context, and therefore will go away by itself. The tuple table
1645                          * slot should not try to clear it.
1646                          */
1647                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1648
1649                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1650                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1651                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1652                         slot = newslot;
1653                         tuple = newtuple;
1654                 }
1655         }
1656
1657         /*
1658          * Check the constraints of the tuple
1659          */
1660         if (resultRelationDesc->rd_att->constr)
1661                 ExecConstraints(resultRelInfo, slot, estate);
1662
1663         /*
1664          * insert the tuple
1665          *
1666          * Note: heap_insert returns the tid (location) of the new tuple in the
1667          * t_self field.
1668          */
1669         newId = heap_insert(resultRelationDesc, tuple,
1670                                                 estate->es_output_cid, 0, NULL);
1671
1672         IncrAppended();
1673         (estate->es_processed)++;
1674         estate->es_lastoid = newId;
1675         setLastTid(&(tuple->t_self));
1676
1677         /*
1678          * insert index entries for tuple
1679          */
1680         if (resultRelInfo->ri_NumIndices > 0)
1681                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1682
1683         /* AFTER ROW INSERT Triggers */
1684         ExecARInsertTriggers(estate, resultRelInfo, tuple);
1685
1686         /* Process RETURNING if present */
1687         if (resultRelInfo->ri_projectReturning)
1688                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1689                                                          slot, planSlot, dest);
1690 }
1691
1692 /* ----------------------------------------------------------------
1693  *              ExecDelete
1694  *
1695  *              DELETE is like UPDATE, except that we delete the tuple and no
1696  *              index modifications are needed
1697  * ----------------------------------------------------------------
1698  */
1699 static void
1700 ExecDelete(ItemPointer tupleid,
1701                    TupleTableSlot *planSlot,
1702                    DestReceiver *dest,
1703                    EState *estate)
1704 {
1705         ResultRelInfo *resultRelInfo;
1706         Relation        resultRelationDesc;
1707         HTSU_Result result;
1708         ItemPointerData update_ctid;
1709         TransactionId update_xmax;
1710
1711         /*
1712          * get information on the (current) result relation
1713          */
1714         resultRelInfo = estate->es_result_relation_info;
1715         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1716
1717         /* BEFORE ROW DELETE Triggers */
1718         if (resultRelInfo->ri_TrigDesc &&
1719                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1720         {
1721                 bool            dodelete;
1722
1723                 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid);
1724
1725                 if (!dodelete)                  /* "do nothing" */
1726                         return;
1727         }
1728
1729         /*
1730          * delete the tuple
1731          *
1732          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1733          * the row to be deleted is visible to that snapshot, and throw a can't-
1734          * serialize error if not.      This is a special-case behavior needed for
1735          * referential integrity updates in serializable transactions.
1736          */
1737 ldelete:;
1738         result = heap_delete(resultRelationDesc, tupleid,
1739                                                  &update_ctid, &update_xmax,
1740                                                  estate->es_output_cid,
1741                                                  estate->es_crosscheck_snapshot,
1742                                                  true /* wait for commit */ );
1743         switch (result)
1744         {
1745                 case HeapTupleSelfUpdated:
1746                         /* already deleted by self; nothing to do */
1747                         return;
1748
1749                 case HeapTupleMayBeUpdated:
1750                         break;
1751
1752                 case HeapTupleUpdated:
1753                         if (IsXactIsoLevelSerializable)
1754                                 ereport(ERROR,
1755                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1756                                                  errmsg("could not serialize access due to concurrent update")));
1757                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1758                         {
1759                                 TupleTableSlot *epqslot;
1760
1761                                 epqslot = EvalPlanQual(estate,
1762                                                                            resultRelInfo->ri_RangeTableIndex,
1763                                                                            &update_ctid,
1764                                                                            update_xmax);
1765                                 if (!TupIsNull(epqslot))
1766                                 {
1767                                         *tupleid = update_ctid;
1768                                         goto ldelete;
1769                                 }
1770                         }
1771                         /* tuple already deleted; nothing to do */
1772                         return;
1773
1774                 default:
1775                         elog(ERROR, "unrecognized heap_delete status: %u", result);
1776                         return;
1777         }
1778
1779         IncrDeleted();
1780         (estate->es_processed)++;
1781
1782         /*
1783          * Note: Normally one would think that we have to delete index tuples
1784          * associated with the heap tuple now...
1785          *
1786          * ... but in POSTGRES, we have no need to do this because VACUUM will
1787          * take care of it later.  We can't delete index tuples immediately
1788          * anyway, since the tuple is still visible to other transactions.
1789          */
1790
1791         /* AFTER ROW DELETE Triggers */
1792         ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1793
1794         /* Process RETURNING if present */
1795         if (resultRelInfo->ri_projectReturning)
1796         {
1797                 /*
1798                  * We have to put the target tuple into a slot, which means first we
1799                  * gotta fetch it.      We can use the trigger tuple slot.
1800                  */
1801                 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1802                 HeapTupleData deltuple;
1803                 Buffer          delbuffer;
1804
1805                 deltuple.t_self = *tupleid;
1806                 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1807                                                 &deltuple, &delbuffer, false, NULL))
1808                         elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1809
1810                 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1811                         ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1812                 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1813
1814                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1815                                                          slot, planSlot, dest);
1816
1817                 ExecClearTuple(slot);
1818                 ReleaseBuffer(delbuffer);
1819         }
1820 }
1821
1822 /* ----------------------------------------------------------------
1823  *              ExecUpdate
1824  *
1825  *              note: we can't run UPDATE queries with transactions
1826  *              off because UPDATEs are actually INSERTs and our
1827  *              scan will mistakenly loop forever, updating the tuple
1828  *              it just inserted..      This should be fixed but until it
1829  *              is, we don't want to get stuck in an infinite loop
1830  *              which corrupts your database..
1831  * ----------------------------------------------------------------
1832  */
1833 static void
1834 ExecUpdate(TupleTableSlot *slot,
1835                    ItemPointer tupleid,
1836                    TupleTableSlot *planSlot,
1837                    DestReceiver *dest,
1838                    EState *estate)
1839 {
1840         HeapTuple       tuple;
1841         ResultRelInfo *resultRelInfo;
1842         Relation        resultRelationDesc;
1843         HTSU_Result result;
1844         ItemPointerData update_ctid;
1845         TransactionId update_xmax;
1846
1847         /*
1848          * abort the operation if not running transactions
1849          */
1850         if (IsBootstrapProcessingMode())
1851                 elog(ERROR, "cannot UPDATE during bootstrap");
1852
1853         /*
1854          * get the heap tuple out of the tuple table slot, making sure we have a
1855          * writable copy
1856          */
1857         tuple = ExecMaterializeSlot(slot);
1858
1859         /*
1860          * get information on the (current) result relation
1861          */
1862         resultRelInfo = estate->es_result_relation_info;
1863         resultRelationDesc = resultRelInfo->ri_RelationDesc;
1864
1865         /* BEFORE ROW UPDATE Triggers */
1866         if (resultRelInfo->ri_TrigDesc &&
1867                 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1868         {
1869                 HeapTuple       newtuple;
1870
1871                 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1872                                                                                 tupleid, tuple);
1873
1874                 if (newtuple == NULL)   /* "do nothing" */
1875                         return;
1876
1877                 if (newtuple != tuple)  /* modified by Trigger(s) */
1878                 {
1879                         /*
1880                          * Put the modified tuple into a slot for convenience of routines
1881                          * below.  We assume the tuple was allocated in per-tuple memory
1882                          * context, and therefore will go away by itself. The tuple table
1883                          * slot should not try to clear it.
1884                          */
1885                         TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1886
1887                         if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1888                                 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1889                         ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1890                         slot = newslot;
1891                         tuple = newtuple;
1892                 }
1893         }
1894
1895         /*
1896          * Check the constraints of the tuple
1897          *
1898          * If we generate a new candidate tuple after EvalPlanQual testing, we
1899          * must loop back here and recheck constraints.  (We don't need to redo
1900          * triggers, however.  If there are any BEFORE triggers then trigger.c
1901          * will have done heap_lock_tuple to lock the correct tuple, so there's no
1902          * need to do them again.)
1903          */
1904 lreplace:;
1905         if (resultRelationDesc->rd_att->constr)
1906                 ExecConstraints(resultRelInfo, slot, estate);
1907
1908         /*
1909          * replace the heap tuple
1910          *
1911          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1912          * the row to be updated is visible to that snapshot, and throw a can't-
1913          * serialize error if not.      This is a special-case behavior needed for
1914          * referential integrity updates in serializable transactions.
1915          */
1916         result = heap_update(resultRelationDesc, tupleid, tuple,
1917                                                  &update_ctid, &update_xmax,
1918                                                  estate->es_output_cid,
1919                                                  estate->es_crosscheck_snapshot,
1920                                                  true /* wait for commit */ );
1921         switch (result)
1922         {
1923                 case HeapTupleSelfUpdated:
1924                         /* already deleted by self; nothing to do */
1925                         return;
1926
1927                 case HeapTupleMayBeUpdated:
1928                         break;
1929
1930                 case HeapTupleUpdated:
1931                         if (IsXactIsoLevelSerializable)
1932                                 ereport(ERROR,
1933                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1934                                                  errmsg("could not serialize access due to concurrent update")));
1935                         else if (!ItemPointerEquals(tupleid, &update_ctid))
1936                         {
1937                                 TupleTableSlot *epqslot;
1938
1939                                 epqslot = EvalPlanQual(estate,
1940                                                                            resultRelInfo->ri_RangeTableIndex,
1941                                                                            &update_ctid,
1942                                                                            update_xmax);
1943                                 if (!TupIsNull(epqslot))
1944                                 {
1945                                         *tupleid = update_ctid;
1946                                         slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1947                                         tuple = ExecMaterializeSlot(slot);
1948                                         goto lreplace;
1949                                 }
1950                         }
1951                         /* tuple already deleted; nothing to do */
1952                         return;
1953
1954                 default:
1955                         elog(ERROR, "unrecognized heap_update status: %u", result);
1956                         return;
1957         }
1958
1959         IncrReplaced();
1960         (estate->es_processed)++;
1961
1962         /*
1963          * Note: instead of having to update the old index tuples associated with
1964          * the heap tuple, all we do is form and insert new index tuples. This is
1965          * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1966          * deletion is done later by VACUUM (see notes in ExecDelete).  All we do
1967          * here is insert new index tuples.  -cim 9/27/89
1968          */
1969
1970         /*
1971          * insert index entries for tuple
1972          *
1973          * Note: heap_update returns the tid (location) of the new tuple in the
1974          * t_self field.
1975          *
1976          * If it's a HOT update, we mustn't insert new index entries.
1977          */
1978         if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
1979                 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1980
1981         /* AFTER ROW UPDATE Triggers */
1982         ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1983
1984         /* Process RETURNING if present */
1985         if (resultRelInfo->ri_projectReturning)
1986                 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1987                                                          slot, planSlot, dest);
1988 }
1989
1990 /*
1991  * ExecRelCheck --- check that tuple meets constraints for result relation
1992  */
1993 static const char *
1994 ExecRelCheck(ResultRelInfo *resultRelInfo,
1995                          TupleTableSlot *slot, EState *estate)
1996 {
1997         Relation        rel = resultRelInfo->ri_RelationDesc;
1998         int                     ncheck = rel->rd_att->constr->num_check;
1999         ConstrCheck *check = rel->rd_att->constr->check;
2000         ExprContext *econtext;
2001         MemoryContext oldContext;
2002         List       *qual;
2003         int                     i;
2004
2005         /*
2006          * If first time through for this result relation, build expression
2007          * nodetrees for rel's constraint expressions.  Keep them in the per-query
2008          * memory context so they'll survive throughout the query.
2009          */
2010         if (resultRelInfo->ri_ConstraintExprs == NULL)
2011         {
2012                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
2013                 resultRelInfo->ri_ConstraintExprs =
2014                         (List **) palloc(ncheck * sizeof(List *));
2015                 for (i = 0; i < ncheck; i++)
2016                 {
2017                         /* ExecQual wants implicit-AND form */
2018                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
2019                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
2020                                 ExecPrepareExpr((Expr *) qual, estate);
2021                 }
2022                 MemoryContextSwitchTo(oldContext);
2023         }
2024
2025         /*
2026          * We will use the EState's per-tuple context for evaluating constraint
2027          * expressions (creating it if it's not already there).
2028          */
2029         econtext = GetPerTupleExprContext(estate);
2030
2031         /* Arrange for econtext's scan tuple to be the tuple under test */
2032         econtext->ecxt_scantuple = slot;
2033
2034         /* And evaluate the constraints */
2035         for (i = 0; i < ncheck; i++)
2036         {
2037                 qual = resultRelInfo->ri_ConstraintExprs[i];
2038
2039                 /*
2040                  * NOTE: SQL92 specifies that a NULL result from a constraint
2041                  * expression is not to be treated as a failure.  Therefore, tell
2042                  * ExecQual to return TRUE for NULL.
2043                  */
2044                 if (!ExecQual(qual, econtext, true))
2045                         return check[i].ccname;
2046         }
2047
2048         /* NULL result means no error */
2049         return NULL;
2050 }
2051
2052 void
2053 ExecConstraints(ResultRelInfo *resultRelInfo,
2054                                 TupleTableSlot *slot, EState *estate)
2055 {
2056         Relation        rel = resultRelInfo->ri_RelationDesc;
2057         TupleConstr *constr = rel->rd_att->constr;
2058
2059         Assert(constr);
2060
2061         if (constr->has_not_null)
2062         {
2063                 int                     natts = rel->rd_att->natts;
2064                 int                     attrChk;
2065
2066                 for (attrChk = 1; attrChk <= natts; attrChk++)
2067                 {
2068                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
2069                                 slot_attisnull(slot, attrChk))
2070                                 ereport(ERROR,
2071                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
2072                                                  errmsg("null value in column \"%s\" violates not-null constraint",
2073                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
2074                 }
2075         }
2076
2077         if (constr->num_check > 0)
2078         {
2079                 const char *failed;
2080
2081                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2082                         ereport(ERROR,
2083                                         (errcode(ERRCODE_CHECK_VIOLATION),
2084                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2085                                                         RelationGetRelationName(rel), failed)));
2086         }
2087 }
2088
2089 /*
2090  * ExecProcessReturning --- evaluate a RETURNING list and send to dest
2091  *
2092  * projectReturning: RETURNING projection info for current result rel
2093  * tupleSlot: slot holding tuple actually inserted/updated/deleted
2094  * planSlot: slot holding tuple returned by top plan node
2095  * dest: where to send the output
2096  */
2097 static void
2098 ExecProcessReturning(ProjectionInfo *projectReturning,
2099                                          TupleTableSlot *tupleSlot,
2100                                          TupleTableSlot *planSlot,
2101                                          DestReceiver *dest)
2102 {
2103         ExprContext *econtext = projectReturning->pi_exprContext;
2104         TupleTableSlot *retSlot;
2105
2106         /*
2107          * Reset per-tuple memory context to free any expression evaluation
2108          * storage allocated in the previous cycle.
2109          */
2110         ResetExprContext(econtext);
2111
2112         /* Make tuple and any needed join variables available to ExecProject */
2113         econtext->ecxt_scantuple = tupleSlot;
2114         econtext->ecxt_outertuple = planSlot;
2115
2116         /* Compute the RETURNING expressions */
2117         retSlot = ExecProject(projectReturning, NULL);
2118
2119         /* Send to dest */
2120         (*dest->receiveSlot) (retSlot, dest);
2121
2122         ExecClearTuple(retSlot);
2123 }
2124
2125 /*
2126  * Check a modified tuple to see if we want to process its updated version
2127  * under READ COMMITTED rules.
2128  *
2129  * See backend/executor/README for some info about how this works.
2130  *
2131  *      estate - executor state data
2132  *      rti - rangetable index of table containing tuple
2133  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
2134  *      priorXmax - t_xmax from the outdated tuple
2135  *
2136  * *tid is also an output parameter: it's modified to hold the TID of the
2137  * latest version of the tuple (note this may be changed even on failure)
2138  *
2139  * Returns a slot containing the new candidate update/delete tuple, or
2140  * NULL if we determine we shouldn't process the row.
2141  */
2142 TupleTableSlot *
2143 EvalPlanQual(EState *estate, Index rti,
2144                          ItemPointer tid, TransactionId priorXmax)
2145 {
2146         evalPlanQual *epq;
2147         EState     *epqstate;
2148         Relation        relation;
2149         HeapTupleData tuple;
2150         HeapTuple       copyTuple = NULL;
2151         SnapshotData SnapshotDirty;
2152         bool            endNode;
2153
2154         Assert(rti != 0);
2155
2156         /*
2157          * find relation containing target tuple
2158          */
2159         if (estate->es_result_relation_info != NULL &&
2160                 estate->es_result_relation_info->ri_RangeTableIndex == rti)
2161                 relation = estate->es_result_relation_info->ri_RelationDesc;
2162         else
2163         {
2164                 ListCell   *l;
2165
2166                 relation = NULL;
2167                 foreach(l, estate->es_rowMarks)
2168                 {
2169                         ExecRowMark *erm = lfirst(l);
2170
2171                         if (erm->rti == rti)
2172                         {
2173                                 relation = erm->relation;
2174                                 break;
2175                         }
2176                 }
2177                 if (relation == NULL)
2178                         elog(ERROR, "could not find RowMark for RT index %u", rti);
2179         }
2180
2181         /*
2182          * fetch tid tuple
2183          *
2184          * Loop here to deal with updated or busy tuples
2185          */
2186         InitDirtySnapshot(SnapshotDirty);
2187         tuple.t_self = *tid;
2188         for (;;)
2189         {
2190                 Buffer          buffer;
2191
2192                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2193                 {
2194                         /*
2195                          * If xmin isn't what we're expecting, the slot must have been
2196                          * recycled and reused for an unrelated tuple.  This implies that
2197                          * the latest version of the row was deleted, so we need do
2198                          * nothing.  (Should be safe to examine xmin without getting
2199                          * buffer's content lock, since xmin never changes in an existing
2200                          * tuple.)
2201                          */
2202                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2203                                                                          priorXmax))
2204                         {
2205                                 ReleaseBuffer(buffer);
2206                                 return NULL;
2207                         }
2208
2209                         /* otherwise xmin should not be dirty... */
2210                         if (TransactionIdIsValid(SnapshotDirty.xmin))
2211                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2212
2213                         /*
2214                          * If tuple is being updated by other transaction then we have to
2215                          * wait for its commit/abort.
2216                          */
2217                         if (TransactionIdIsValid(SnapshotDirty.xmax))
2218                         {
2219                                 ReleaseBuffer(buffer);
2220                                 XactLockTableWait(SnapshotDirty.xmax);
2221                                 continue;               /* loop back to repeat heap_fetch */
2222                         }
2223
2224                         /*
2225                          * If tuple was inserted by our own transaction, we have to check
2226                          * cmin against es_output_cid: cmin >= current CID means our
2227                          * command cannot see the tuple, so we should ignore it.  Without
2228                          * this we are open to the "Halloween problem" of indefinitely
2229                          * re-updating the same tuple. (We need not check cmax because
2230                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
2231                          * transaction dead, regardless of cmax.)  We just checked that
2232                          * priorXmax == xmin, so we can test that variable instead of
2233                          * doing HeapTupleHeaderGetXmin again.
2234                          */
2235                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2236                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2237                         {
2238                                 ReleaseBuffer(buffer);
2239                                 return NULL;
2240                         }
2241
2242                         /*
2243                          * We got tuple - now copy it for use by recheck query.
2244                          */
2245                         copyTuple = heap_copytuple(&tuple);
2246                         ReleaseBuffer(buffer);
2247                         break;
2248                 }
2249
2250                 /*
2251                  * If the referenced slot was actually empty, the latest version of
2252                  * the row must have been deleted, so we need do nothing.
2253                  */
2254                 if (tuple.t_data == NULL)
2255                 {
2256                         ReleaseBuffer(buffer);
2257                         return NULL;
2258                 }
2259
2260                 /*
2261                  * As above, if xmin isn't what we're expecting, do nothing.
2262                  */
2263                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2264                                                                  priorXmax))
2265                 {
2266                         ReleaseBuffer(buffer);
2267                         return NULL;
2268                 }
2269
2270                 /*
2271                  * If we get here, the tuple was found but failed SnapshotDirty.
2272                  * Assuming the xmin is either a committed xact or our own xact (as it
2273                  * certainly should be if we're trying to modify the tuple), this must
2274                  * mean that the row was updated or deleted by either a committed xact
2275                  * or our own xact.  If it was deleted, we can ignore it; if it was
2276                  * updated then chain up to the next version and repeat the whole
2277                  * test.
2278                  *
2279                  * As above, it should be safe to examine xmax and t_ctid without the
2280                  * buffer content lock, because they can't be changing.
2281                  */
2282                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2283                 {
2284                         /* deleted, so forget about it */
2285                         ReleaseBuffer(buffer);
2286                         return NULL;
2287                 }
2288
2289                 /* updated, so look at the updated row */
2290                 tuple.t_self = tuple.t_data->t_ctid;
2291                 /* updated row should have xmin matching this xmax */
2292                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2293                 ReleaseBuffer(buffer);
2294                 /* loop back to fetch next in chain */
2295         }
2296
2297         /*
2298          * For UPDATE/DELETE we have to return tid of actual row we're executing
2299          * PQ for.
2300          */
2301         *tid = tuple.t_self;
2302
2303         /*
2304          * Need to run a recheck subquery.      Find or create a PQ stack entry.
2305          */
2306         epq = estate->es_evalPlanQual;
2307         endNode = true;
2308
2309         if (epq != NULL && epq->rti == 0)
2310         {
2311                 /* Top PQ stack entry is idle, so re-use it */
2312                 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2313                 epq->rti = rti;
2314                 endNode = false;
2315         }
2316
2317         /*
2318          * If this is request for another RTE - Ra, - then we have to check wasn't
2319          * PlanQual requested for Ra already and if so then Ra' row was updated
2320          * again and we have to re-start old execution for Ra and forget all what
2321          * we done after Ra was suspended. Cool? -:))
2322          */
2323         if (epq != NULL && epq->rti != rti &&
2324                 epq->estate->es_evTuple[rti - 1] != NULL)
2325         {
2326                 do
2327                 {
2328                         evalPlanQual *oldepq;
2329
2330                         /* stop execution */
2331                         EvalPlanQualStop(epq);
2332                         /* pop previous PlanQual from the stack */
2333                         oldepq = epq->next;
2334                         Assert(oldepq && oldepq->rti != 0);
2335                         /* push current PQ to freePQ stack */
2336                         oldepq->free = epq;
2337                         epq = oldepq;
2338                         estate->es_evalPlanQual = epq;
2339                 } while (epq->rti != rti);
2340         }
2341
2342         /*
2343          * If we are requested for another RTE then we have to suspend execution
2344          * of current PlanQual and start execution for new one.
2345          */
2346         if (epq == NULL || epq->rti != rti)
2347         {
2348                 /* try to reuse plan used previously */
2349                 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2350
2351                 if (newepq == NULL)             /* first call or freePQ stack is empty */
2352                 {
2353                         newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2354                         newepq->free = NULL;
2355                         newepq->estate = NULL;
2356                         newepq->planstate = NULL;
2357                 }
2358                 else
2359                 {
2360                         /* recycle previously used PlanQual */
2361                         Assert(newepq->estate == NULL);
2362                         epq->free = NULL;
2363                 }
2364                 /* push current PQ to the stack */
2365                 newepq->next = epq;
2366                 epq = newepq;
2367                 estate->es_evalPlanQual = epq;
2368                 epq->rti = rti;
2369                 endNode = false;
2370         }
2371
2372         Assert(epq->rti == rti);
2373
2374         /*
2375          * Ok - we're requested for the same RTE.  Unfortunately we still have to
2376          * end and restart execution of the plan, because ExecReScan wouldn't
2377          * ensure that upper plan nodes would reset themselves.  We could make
2378          * that work if insertion of the target tuple were integrated with the
2379          * Param mechanism somehow, so that the upper plan nodes know that their
2380          * children's outputs have changed.
2381          *
2382          * Note that the stack of free evalPlanQual nodes is quite useless at the
2383          * moment, since it only saves us from pallocing/releasing the
2384          * evalPlanQual nodes themselves.  But it will be useful once we implement
2385          * ReScan instead of end/restart for re-using PlanQual nodes.
2386          */
2387         if (endNode)
2388         {
2389                 /* stop execution */
2390                 EvalPlanQualStop(epq);
2391         }
2392
2393         /*
2394          * Initialize new recheck query.
2395          *
2396          * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2397          * instead copy down changeable state from the top plan (including
2398          * es_result_relation_info, es_junkFilter) and reset locally changeable
2399          * state in the epq (including es_param_exec_vals, es_evTupleNull).
2400          */
2401         EvalPlanQualStart(epq, estate, epq->next);
2402
2403         /*
2404          * free old RTE' tuple, if any, and store target tuple where relation's
2405          * scan node will see it
2406          */
2407         epqstate = epq->estate;
2408         if (epqstate->es_evTuple[rti - 1] != NULL)
2409                 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2410         epqstate->es_evTuple[rti - 1] = copyTuple;
2411
2412         return EvalPlanQualNext(estate);
2413 }
2414
2415 static TupleTableSlot *
2416 EvalPlanQualNext(EState *estate)
2417 {
2418         evalPlanQual *epq = estate->es_evalPlanQual;
2419         MemoryContext oldcontext;
2420         TupleTableSlot *slot;
2421
2422         Assert(epq->rti != 0);
2423
2424 lpqnext:;
2425         oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2426         slot = ExecProcNode(epq->planstate);
2427         MemoryContextSwitchTo(oldcontext);
2428
2429         /*
2430          * No more tuples for this PQ. Continue previous one.
2431          */
2432         if (TupIsNull(slot))
2433         {
2434                 evalPlanQual *oldepq;
2435
2436                 /* stop execution */
2437                 EvalPlanQualStop(epq);
2438                 /* pop old PQ from the stack */
2439                 oldepq = epq->next;
2440                 if (oldepq == NULL)
2441                 {
2442                         /* this is the first (oldest) PQ - mark as free */
2443                         epq->rti = 0;
2444                         estate->es_useEvalPlan = false;
2445                         /* and continue Query execution */
2446                         return NULL;
2447                 }
2448                 Assert(oldepq->rti != 0);
2449                 /* push current PQ to freePQ stack */
2450                 oldepq->free = epq;
2451                 epq = oldepq;
2452                 estate->es_evalPlanQual = epq;
2453                 goto lpqnext;
2454         }
2455
2456         return slot;
2457 }
2458
2459 static void
2460 EndEvalPlanQual(EState *estate)
2461 {
2462         evalPlanQual *epq = estate->es_evalPlanQual;
2463
2464         if (epq->rti == 0)                      /* plans already shutdowned */
2465         {
2466                 Assert(epq->next == NULL);
2467                 return;
2468         }
2469
2470         for (;;)
2471         {
2472                 evalPlanQual *oldepq;
2473
2474                 /* stop execution */
2475                 EvalPlanQualStop(epq);
2476                 /* pop old PQ from the stack */
2477                 oldepq = epq->next;
2478                 if (oldepq == NULL)
2479                 {
2480                         /* this is the first (oldest) PQ - mark as free */
2481                         epq->rti = 0;
2482                         estate->es_useEvalPlan = false;
2483                         break;
2484                 }
2485                 Assert(oldepq->rti != 0);
2486                 /* push current PQ to freePQ stack */
2487                 oldepq->free = epq;
2488                 epq = oldepq;
2489                 estate->es_evalPlanQual = epq;
2490         }
2491 }
2492
2493 /*
2494  * Start execution of one level of PlanQual.
2495  *
2496  * This is a cut-down version of ExecutorStart(): we copy some state from
2497  * the top-level estate rather than initializing it fresh.
2498  */
2499 static void
2500 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2501 {
2502         EState     *epqstate;
2503         int                     rtsize;
2504         MemoryContext oldcontext;
2505         ListCell   *l;
2506
2507         rtsize = list_length(estate->es_range_table);
2508
2509         epq->estate = epqstate = CreateExecutorState();
2510
2511         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2512
2513         /*
2514          * The epqstates share the top query's copy of unchanging state such as
2515          * the snapshot, rangetable, result-rel info, and external Param info.
2516          * They need their own copies of local state, including a tuple table,
2517          * es_param_exec_vals, etc.
2518          */
2519         epqstate->es_direction = ForwardScanDirection;
2520         epqstate->es_snapshot = estate->es_snapshot;
2521         epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2522         epqstate->es_range_table = estate->es_range_table;
2523         epqstate->es_output_cid = estate->es_output_cid;
2524         epqstate->es_result_relations = estate->es_result_relations;
2525         epqstate->es_num_result_relations = estate->es_num_result_relations;
2526         epqstate->es_result_relation_info = estate->es_result_relation_info;
2527         epqstate->es_junkFilter = estate->es_junkFilter;
2528         /* es_trig_target_relations must NOT be copied */
2529         epqstate->es_param_list_info = estate->es_param_list_info;
2530         if (estate->es_plannedstmt->nParamExec > 0)
2531                 epqstate->es_param_exec_vals = (ParamExecData *)
2532                         palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2533         epqstate->es_rowMarks = estate->es_rowMarks;
2534         epqstate->es_instrument = estate->es_instrument;
2535         epqstate->es_select_into = estate->es_select_into;
2536         epqstate->es_into_oids = estate->es_into_oids;
2537         epqstate->es_plannedstmt = estate->es_plannedstmt;
2538
2539         /*
2540          * Each epqstate must have its own es_evTupleNull state, but all the stack
2541          * entries share es_evTuple state.      This allows sub-rechecks to inherit
2542          * the value being examined by an outer recheck.
2543          */
2544         epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2545         if (priorepq == NULL)
2546                 /* first PQ stack entry */
2547                 epqstate->es_evTuple = (HeapTuple *)
2548                         palloc0(rtsize * sizeof(HeapTuple));
2549         else
2550                 /* later stack entries share the same storage */
2551                 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2552
2553         /*
2554          * Create sub-tuple-table; we needn't redo the CountSlots work though.
2555          */
2556         epqstate->es_tupleTable =
2557                 ExecCreateTupleTable(estate->es_tupleTable->size);
2558
2559         /*
2560          * Initialize private state information for each SubPlan.  We must do this
2561          * before running ExecInitNode on the main query tree, since
2562          * ExecInitSubPlan expects to be able to find these entries.
2563          */
2564         Assert(epqstate->es_subplanstates == NIL);
2565         foreach(l, estate->es_plannedstmt->subplans)
2566         {
2567                 Plan       *subplan = (Plan *) lfirst(l);
2568                 PlanState  *subplanstate;
2569
2570                 subplanstate = ExecInitNode(subplan, epqstate, 0);
2571
2572                 epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
2573                                                                                          subplanstate);
2574         }
2575
2576         /*
2577          * Initialize the private state information for all the nodes in the query
2578          * tree.  This opens files, allocates storage and leaves us ready to start
2579          * processing tuples.
2580          */
2581         epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2582
2583         MemoryContextSwitchTo(oldcontext);
2584 }
2585
2586 /*
2587  * End execution of one level of PlanQual.
2588  *
2589  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2590  * of the normal cleanup, but *not* close result relations (which we are
2591  * just sharing from the outer query).  We do, however, have to close any
2592  * trigger target relations that got opened, since those are not shared.
2593  */
2594 static void
2595 EvalPlanQualStop(evalPlanQual *epq)
2596 {
2597         EState     *epqstate = epq->estate;
2598         MemoryContext oldcontext;
2599         ListCell   *l;
2600
2601         oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2602
2603         ExecEndNode(epq->planstate);
2604
2605         foreach(l, epqstate->es_subplanstates)
2606         {
2607                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2608
2609                 ExecEndNode(subplanstate);
2610         }
2611
2612         ExecDropTupleTable(epqstate->es_tupleTable, true);
2613         epqstate->es_tupleTable = NULL;
2614
2615         if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2616         {
2617                 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2618                 epqstate->es_evTuple[epq->rti - 1] = NULL;
2619         }
2620
2621         foreach(l, epqstate->es_trig_target_relations)
2622         {
2623                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2624
2625                 /* Close indices and then the relation itself */
2626                 ExecCloseIndices(resultRelInfo);
2627                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2628         }
2629
2630         MemoryContextSwitchTo(oldcontext);
2631
2632         FreeExecutorState(epqstate);
2633
2634         epq->estate = NULL;
2635         epq->planstate = NULL;
2636 }
2637
2638 /*
2639  * ExecGetActivePlanTree --- get the active PlanState tree from a QueryDesc
2640  *
2641  * Ordinarily this is just the one mentioned in the QueryDesc, but if we
2642  * are looking at a row returned by the EvalPlanQual machinery, we need
2643  * to look at the subsidiary state instead.
2644  */
2645 PlanState *
2646 ExecGetActivePlanTree(QueryDesc *queryDesc)
2647 {
2648         EState     *estate = queryDesc->estate;
2649
2650         if (estate && estate->es_useEvalPlan && estate->es_evalPlanQual != NULL)
2651                 return estate->es_evalPlanQual->planstate;
2652         else
2653                 return queryDesc->planstate;
2654 }
2655
2656
2657 /*
2658  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2659  *
2660  * We implement SELECT INTO by diverting SELECT's normal output with
2661  * a specialized DestReceiver type.
2662  */
2663
2664 typedef struct
2665 {
2666         DestReceiver pub;                       /* publicly-known function pointers */
2667         EState     *estate;                     /* EState we are working with */
2668         Relation        rel;                    /* Relation to write to */
2669         int                     hi_options;             /* heap_insert performance options */
2670         BulkInsertState bistate;        /* bulk insert state */
2671 } DR_intorel;
2672
2673 /*
2674  * OpenIntoRel --- actually create the SELECT INTO target relation
2675  *
2676  * This also replaces QueryDesc->dest with the special DestReceiver for
2677  * SELECT INTO.  We assume that the correct result tuple type has already
2678  * been placed in queryDesc->tupDesc.
2679  */
2680 static void
2681 OpenIntoRel(QueryDesc *queryDesc)
2682 {
2683         IntoClause *into = queryDesc->plannedstmt->intoClause;
2684         EState     *estate = queryDesc->estate;
2685         Relation        intoRelationDesc;
2686         char       *intoName;
2687         Oid                     namespaceId;
2688         Oid                     tablespaceId;
2689         Datum           reloptions;
2690         AclResult       aclresult;
2691         Oid                     intoRelationId;
2692         TupleDesc       tupdesc;
2693         DR_intorel *myState;
2694
2695         Assert(into);
2696
2697         /*
2698          * Check consistency of arguments
2699          */
2700         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2701                 ereport(ERROR,
2702                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2703                                  errmsg("ON COMMIT can only be used on temporary tables")));
2704
2705         /*
2706          * Find namespace to create in, check its permissions
2707          */
2708         intoName = into->rel->relname;
2709         namespaceId = RangeVarGetCreationNamespace(into->rel);
2710
2711         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2712                                                                           ACL_CREATE);
2713         if (aclresult != ACLCHECK_OK)
2714                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2715                                            get_namespace_name(namespaceId));
2716
2717         /*
2718          * Select tablespace to use.  If not specified, use default tablespace
2719          * (which may in turn default to database's default).
2720          */
2721         if (into->tableSpaceName)
2722         {
2723                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2724                 if (!OidIsValid(tablespaceId))
2725                         ereport(ERROR,
2726                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2727                                          errmsg("tablespace \"%s\" does not exist",
2728                                                         into->tableSpaceName)));
2729         }
2730         else
2731         {
2732                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2733                 /* note InvalidOid is OK in this case */
2734         }
2735
2736         /* Check permissions except when using the database's default space */
2737         if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2738         {
2739                 AclResult       aclresult;
2740
2741                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2742                                                                                    ACL_CREATE);
2743
2744                 if (aclresult != ACLCHECK_OK)
2745                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2746                                                    get_tablespace_name(tablespaceId));
2747         }
2748
2749         /* Parse and validate any reloptions */
2750         reloptions = transformRelOptions((Datum) 0,
2751                                                                          into->options,
2752                                                                          true,
2753                                                                          false);
2754         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2755
2756         /* Copy the tupdesc because heap_create_with_catalog modifies it */
2757         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2758
2759         /* Now we can actually create the new relation */
2760         intoRelationId = heap_create_with_catalog(intoName,
2761                                                                                           namespaceId,
2762                                                                                           tablespaceId,
2763                                                                                           InvalidOid,
2764                                                                                           GetUserId(),
2765                                                                                           tupdesc,
2766                                                                                           NIL,
2767                                                                                           RELKIND_RELATION,
2768                                                                                           false,
2769                                                                                           true,
2770                                                                                           0,
2771                                                                                           into->onCommit,
2772                                                                                           reloptions,
2773                                                                                           allowSystemTableMods);
2774
2775         FreeTupleDesc(tupdesc);
2776
2777         /*
2778          * Advance command counter so that the newly-created relation's catalog
2779          * tuples will be visible to heap_open.
2780          */
2781         CommandCounterIncrement();
2782
2783         /*
2784          * If necessary, create a TOAST table for the INTO relation. Note that
2785          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2786          * the TOAST table will be visible for insertion.
2787          */
2788         AlterTableCreateToastTable(intoRelationId);
2789
2790         /*
2791          * And open the constructed table for writing.
2792          */
2793         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2794
2795         /*
2796          * Now replace the query's DestReceiver with one for SELECT INTO
2797          */
2798         queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2799         myState = (DR_intorel *) queryDesc->dest;
2800         Assert(myState->pub.mydest == DestIntoRel);
2801         myState->estate = estate;
2802         myState->rel = intoRelationDesc;
2803
2804         /*
2805          * We can skip WAL-logging the insertions, unless PITR is in use.  We
2806          * can skip the FSM in any case.
2807          */
2808         myState->hi_options = HEAP_INSERT_SKIP_FSM |
2809                 (XLogArchivingActive() ? 0 : HEAP_INSERT_SKIP_WAL);
2810         myState->bistate = GetBulkInsertState();
2811
2812         /* Not using WAL requires rd_targblock be initially invalid */
2813         Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2814 }
2815
2816 /*
2817  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2818  */
2819 static void
2820 CloseIntoRel(QueryDesc *queryDesc)
2821 {
2822         DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2823
2824         /* OpenIntoRel might never have gotten called */
2825         if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2826         {
2827                 FreeBulkInsertState(myState->bistate);
2828
2829                 /* If we skipped using WAL, must heap_sync before commit */
2830                 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2831                         heap_sync(myState->rel);
2832
2833                 /* close rel, but keep lock until commit */
2834                 heap_close(myState->rel, NoLock);
2835
2836                 myState->rel = NULL;
2837         }
2838 }
2839
2840 /*
2841  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2842  *
2843  * Since CreateDestReceiver doesn't accept the parameters we'd need,
2844  * we just leave the private fields zeroed here.  OpenIntoRel will
2845  * fill them in.
2846  */
2847 DestReceiver *
2848 CreateIntoRelDestReceiver(void)
2849 {
2850         DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2851
2852         self->pub.receiveSlot = intorel_receive;
2853         self->pub.rStartup = intorel_startup;
2854         self->pub.rShutdown = intorel_shutdown;
2855         self->pub.rDestroy = intorel_destroy;
2856         self->pub.mydest = DestIntoRel;
2857
2858         return (DestReceiver *) self;
2859 }
2860
2861 /*
2862  * intorel_startup --- executor startup
2863  */
2864 static void
2865 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2866 {
2867         /* no-op */
2868 }
2869
2870 /*
2871  * intorel_receive --- receive one tuple
2872  */
2873 static void
2874 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2875 {
2876         DR_intorel *myState = (DR_intorel *) self;
2877         HeapTuple       tuple;
2878
2879         /*
2880          * get the heap tuple out of the tuple table slot, making sure we have a
2881          * writable copy
2882          */
2883         tuple = ExecMaterializeSlot(slot);
2884
2885         heap_insert(myState->rel,
2886                                 tuple,
2887                                 myState->estate->es_output_cid,
2888                                 myState->hi_options,
2889                                 myState->bistate);
2890
2891         /* We know this is a newly created relation, so there are no indexes */
2892
2893         IncrAppended();
2894 }
2895
2896 /*
2897  * intorel_shutdown --- executor end
2898  */
2899 static void
2900 intorel_shutdown(DestReceiver *self)
2901 {
2902         /* no-op */
2903 }
2904
2905 /*
2906  * intorel_destroy --- release DestReceiver object
2907  */
2908 static void
2909 intorel_destroy(DestReceiver *self)
2910 {
2911         pfree(self);
2912 }