OSDN Git Service

Make NestLoop plan nodes pass outer-relation variables into their inner
[pg-rex/syncrep.git] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.351 2010/07/12 17:01:05 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/reloptions.h"
36 #include "access/sysattr.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "miscadmin.h"
47 #include "optimizer/clauses.h"
48 #include "parser/parse_clause.h"
49 #include "parser/parsetree.h"
50 #include "storage/bufmgr.h"
51 #include "storage/lmgr.h"
52 #include "storage/smgr.h"
53 #include "tcop/utility.h"
54 #include "utils/acl.h"
55 #include "utils/lsyscache.h"
56 #include "utils/memutils.h"
57 #include "utils/snapmgr.h"
58 #include "utils/tqual.h"
59
60
61 /* Hooks for plugins to get control in ExecutorStart/Run/End() */
62 ExecutorStart_hook_type ExecutorStart_hook = NULL;
63 ExecutorRun_hook_type ExecutorRun_hook = NULL;
64 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
65
66 /* Hook for plugin to get control in ExecCheckRTPerms() */
67 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
68
69 /* decls for local routines only used within this module */
70 static void InitPlan(QueryDesc *queryDesc, int eflags);
71 static void ExecEndPlan(PlanState *planstate, EState *estate);
72 static void ExecutePlan(EState *estate, PlanState *planstate,
73                         CmdType operation,
74                         bool sendTuples,
75                         long numberTuples,
76                         ScanDirection direction,
77                         DestReceiver *dest);
78 static void ExecCheckRTPerms(List *rangeTable);
79 static void ExecCheckRTEPerms(RangeTblEntry *rte);
80 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
81 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
82                                   Plan *planTree);
83 static void OpenIntoRel(QueryDesc *queryDesc);
84 static void CloseIntoRel(QueryDesc *queryDesc);
85 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
86 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
87 static void intorel_shutdown(DestReceiver *self);
88 static void intorel_destroy(DestReceiver *self);
89
90 /* end of local decls */
91
92
93 /* ----------------------------------------------------------------
94  *              ExecutorStart
95  *
96  *              This routine must be called at the beginning of any execution of any
97  *              query plan
98  *
99  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
100  * clear why we bother to separate the two functions, but...).  The tupDesc
101  * field of the QueryDesc is filled in to describe the tuples that will be
102  * returned, and the internal fields (estate and planstate) are set up.
103  *
104  * eflags contains flag bits as described in executor.h.
105  *
106  * NB: the CurrentMemoryContext when this is called will become the parent
107  * of the per-query context used for this Executor invocation.
108  *
109  * We provide a function hook variable that lets loadable plugins
110  * get control when ExecutorStart is called.  Such a plugin would
111  * normally call standard_ExecutorStart().
112  *
113  * ----------------------------------------------------------------
114  */
115 void
116 ExecutorStart(QueryDesc *queryDesc, int eflags)
117 {
118         if (ExecutorStart_hook)
119                 (*ExecutorStart_hook) (queryDesc, eflags);
120         else
121                 standard_ExecutorStart(queryDesc, eflags);
122 }
123
124 void
125 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
126 {
127         EState     *estate;
128         MemoryContext oldcontext;
129
130         /* sanity checks: queryDesc must not be started already */
131         Assert(queryDesc != NULL);
132         Assert(queryDesc->estate == NULL);
133
134         /*
135          * If the transaction is read-only, we need to check if any writes are
136          * planned to non-temporary tables.  EXPLAIN is considered read-only.
137          */
138         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
139                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
140
141         /*
142          * Build EState, switch into per-query memory context for startup.
143          */
144         estate = CreateExecutorState();
145         queryDesc->estate = estate;
146
147         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
148
149         /*
150          * Fill in external parameters, if any, from queryDesc; and allocate
151          * workspace for internal parameters
152          */
153         estate->es_param_list_info = queryDesc->params;
154
155         if (queryDesc->plannedstmt->nParamExec > 0)
156                 estate->es_param_exec_vals = (ParamExecData *)
157                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
158
159         /*
160          * If non-read-only query, set the command ID to mark output tuples with
161          */
162         switch (queryDesc->operation)
163         {
164                 case CMD_SELECT:
165                         /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
166                         if (queryDesc->plannedstmt->intoClause != NULL ||
167                                 queryDesc->plannedstmt->rowMarks != NIL)
168                                 estate->es_output_cid = GetCurrentCommandId(true);
169                         break;
170
171                 case CMD_INSERT:
172                 case CMD_DELETE:
173                 case CMD_UPDATE:
174                         estate->es_output_cid = GetCurrentCommandId(true);
175                         break;
176
177                 default:
178                         elog(ERROR, "unrecognized operation code: %d",
179                                  (int) queryDesc->operation);
180                         break;
181         }
182
183         /*
184          * Copy other important information into the EState
185          */
186         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
187         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
188         estate->es_instrument = queryDesc->instrument_options;
189
190         /*
191          * Initialize the plan state tree
192          */
193         InitPlan(queryDesc, eflags);
194
195         MemoryContextSwitchTo(oldcontext);
196 }
197
198 /* ----------------------------------------------------------------
199  *              ExecutorRun
200  *
201  *              This is the main routine of the executor module. It accepts
202  *              the query descriptor from the traffic cop and executes the
203  *              query plan.
204  *
205  *              ExecutorStart must have been called already.
206  *
207  *              If direction is NoMovementScanDirection then nothing is done
208  *              except to start up/shut down the destination.  Otherwise,
209  *              we retrieve up to 'count' tuples in the specified direction.
210  *
211  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
212  *              completion.
213  *
214  *              There is no return value, but output tuples (if any) are sent to
215  *              the destination receiver specified in the QueryDesc; and the number
216  *              of tuples processed at the top level can be found in
217  *              estate->es_processed.
218  *
219  *              We provide a function hook variable that lets loadable plugins
220  *              get control when ExecutorRun is called.  Such a plugin would
221  *              normally call standard_ExecutorRun().
222  *
223  * ----------------------------------------------------------------
224  */
225 void
226 ExecutorRun(QueryDesc *queryDesc,
227                         ScanDirection direction, long count)
228 {
229         if (ExecutorRun_hook)
230                 (*ExecutorRun_hook) (queryDesc, direction, count);
231         else
232                 standard_ExecutorRun(queryDesc, direction, count);
233 }
234
235 void
236 standard_ExecutorRun(QueryDesc *queryDesc,
237                                          ScanDirection direction, long count)
238 {
239         EState     *estate;
240         CmdType         operation;
241         DestReceiver *dest;
242         bool            sendTuples;
243         MemoryContext oldcontext;
244
245         /* sanity checks */
246         Assert(queryDesc != NULL);
247
248         estate = queryDesc->estate;
249
250         Assert(estate != NULL);
251
252         /*
253          * Switch into per-query memory context
254          */
255         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
256
257         /* Allow instrumentation of ExecutorRun overall runtime */
258         if (queryDesc->totaltime)
259                 InstrStartNode(queryDesc->totaltime);
260
261         /*
262          * extract information from the query descriptor and the query feature.
263          */
264         operation = queryDesc->operation;
265         dest = queryDesc->dest;
266
267         /*
268          * startup tuple receiver, if we will be emitting tuples
269          */
270         estate->es_processed = 0;
271         estate->es_lastoid = InvalidOid;
272
273         sendTuples = (operation == CMD_SELECT ||
274                                   queryDesc->plannedstmt->hasReturning);
275
276         if (sendTuples)
277                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
278
279         /*
280          * run plan
281          */
282         if (!ScanDirectionIsNoMovement(direction))
283                 ExecutePlan(estate,
284                                         queryDesc->planstate,
285                                         operation,
286                                         sendTuples,
287                                         count,
288                                         direction,
289                                         dest);
290
291         /*
292          * shutdown tuple receiver, if we started it
293          */
294         if (sendTuples)
295                 (*dest->rShutdown) (dest);
296
297         if (queryDesc->totaltime)
298                 InstrStopNode(queryDesc->totaltime, estate->es_processed);
299
300         MemoryContextSwitchTo(oldcontext);
301 }
302
303 /* ----------------------------------------------------------------
304  *              ExecutorEnd
305  *
306  *              This routine must be called at the end of execution of any
307  *              query plan
308  *
309  *              We provide a function hook variable that lets loadable plugins
310  *              get control when ExecutorEnd is called.  Such a plugin would
311  *              normally call standard_ExecutorEnd().
312  *
313  * ----------------------------------------------------------------
314  */
315 void
316 ExecutorEnd(QueryDesc *queryDesc)
317 {
318         if (ExecutorEnd_hook)
319                 (*ExecutorEnd_hook) (queryDesc);
320         else
321                 standard_ExecutorEnd(queryDesc);
322 }
323
324 void
325 standard_ExecutorEnd(QueryDesc *queryDesc)
326 {
327         EState     *estate;
328         MemoryContext oldcontext;
329
330         /* sanity checks */
331         Assert(queryDesc != NULL);
332
333         estate = queryDesc->estate;
334
335         Assert(estate != NULL);
336
337         /*
338          * Switch into per-query memory context to run ExecEndPlan
339          */
340         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
341
342         ExecEndPlan(queryDesc->planstate, estate);
343
344         /*
345          * Close the SELECT INTO relation if any
346          */
347         if (estate->es_select_into)
348                 CloseIntoRel(queryDesc);
349
350         /* do away with our snapshots */
351         UnregisterSnapshot(estate->es_snapshot);
352         UnregisterSnapshot(estate->es_crosscheck_snapshot);
353
354         /*
355          * Must switch out of context before destroying it
356          */
357         MemoryContextSwitchTo(oldcontext);
358
359         /*
360          * Release EState and per-query memory context.  This should release
361          * everything the executor has allocated.
362          */
363         FreeExecutorState(estate);
364
365         /* Reset queryDesc fields that no longer point to anything */
366         queryDesc->tupDesc = NULL;
367         queryDesc->estate = NULL;
368         queryDesc->planstate = NULL;
369         queryDesc->totaltime = NULL;
370 }
371
372 /* ----------------------------------------------------------------
373  *              ExecutorRewind
374  *
375  *              This routine may be called on an open queryDesc to rewind it
376  *              to the start.
377  * ----------------------------------------------------------------
378  */
379 void
380 ExecutorRewind(QueryDesc *queryDesc)
381 {
382         EState     *estate;
383         MemoryContext oldcontext;
384
385         /* sanity checks */
386         Assert(queryDesc != NULL);
387
388         estate = queryDesc->estate;
389
390         Assert(estate != NULL);
391
392         /* It's probably not sensible to rescan updating queries */
393         Assert(queryDesc->operation == CMD_SELECT);
394
395         /*
396          * Switch into per-query memory context
397          */
398         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
399
400         /*
401          * rescan plan
402          */
403         ExecReScan(queryDesc->planstate);
404
405         MemoryContextSwitchTo(oldcontext);
406 }
407
408
409 /*
410  * ExecCheckRTPerms
411  *              Check access permissions for all relations listed in a range table.
412  */
413 static void
414 ExecCheckRTPerms(List *rangeTable)
415 {
416         ListCell   *l;
417
418         foreach(l, rangeTable)
419         {
420                 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
421         }
422
423         if (ExecutorCheckPerms_hook)
424                 (*ExecutorCheckPerms_hook)(rangeTable);
425 }
426
427 /*
428  * ExecCheckRTEPerms
429  *              Check access permissions for a single RTE.
430  */
431 static void
432 ExecCheckRTEPerms(RangeTblEntry *rte)
433 {
434         AclMode         requiredPerms;
435         AclMode         relPerms;
436         AclMode         remainingPerms;
437         Oid                     relOid;
438         Oid                     userid;
439         Bitmapset  *tmpset;
440         int                     col;
441
442         /*
443          * Only plain-relation RTEs need to be checked here.  Function RTEs are
444          * checked by init_fcache when the function is prepared for execution.
445          * Join, subquery, and special RTEs need no checks.
446          */
447         if (rte->rtekind != RTE_RELATION)
448                 return;
449
450         /*
451          * No work if requiredPerms is empty.
452          */
453         requiredPerms = rte->requiredPerms;
454         if (requiredPerms == 0)
455                 return;
456
457         relOid = rte->relid;
458
459         /*
460          * userid to check as: current user unless we have a setuid indication.
461          *
462          * Note: GetUserId() is presently fast enough that there's no harm in
463          * calling it separately for each RTE.  If that stops being true, we could
464          * call it once in ExecCheckRTPerms and pass the userid down from there.
465          * But for now, no need for the extra clutter.
466          */
467         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
468
469         /*
470          * We must have *all* the requiredPerms bits, but some of the bits can be
471          * satisfied from column-level rather than relation-level permissions.
472          * First, remove any bits that are satisfied by relation permissions.
473          */
474         relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
475         remainingPerms = requiredPerms & ~relPerms;
476         if (remainingPerms != 0)
477         {
478                 /*
479                  * If we lack any permissions that exist only as relation permissions,
480                  * we can fail straight away.
481                  */
482                 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
483                         aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
484                                                    get_rel_name(relOid));
485
486                 /*
487                  * Check to see if we have the needed privileges at column level.
488                  *
489                  * Note: failures just report a table-level error; it would be nicer
490                  * to report a column-level error if we have some but not all of the
491                  * column privileges.
492                  */
493                 if (remainingPerms & ACL_SELECT)
494                 {
495                         /*
496                          * When the query doesn't explicitly reference any columns (for
497                          * example, SELECT COUNT(*) FROM table), allow the query if we
498                          * have SELECT on any column of the rel, as per SQL spec.
499                          */
500                         if (bms_is_empty(rte->selectedCols))
501                         {
502                                 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
503                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
504                                         aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
505                                                                    get_rel_name(relOid));
506                         }
507
508                         tmpset = bms_copy(rte->selectedCols);
509                         while ((col = bms_first_member(tmpset)) >= 0)
510                         {
511                                 /* remove the column number offset */
512                                 col += FirstLowInvalidHeapAttributeNumber;
513                                 if (col == InvalidAttrNumber)
514                                 {
515                                         /* Whole-row reference, must have priv on all cols */
516                                         if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
517                                                                                                   ACLMASK_ALL) != ACLCHECK_OK)
518                                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
519                                                                            get_rel_name(relOid));
520                                 }
521                                 else
522                                 {
523                                         if (pg_attribute_aclcheck(relOid, col, userid, ACL_SELECT)
524                                                 != ACLCHECK_OK)
525                                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
526                                                                            get_rel_name(relOid));
527                                 }
528                         }
529                         bms_free(tmpset);
530                 }
531
532                 /*
533                  * Basically the same for the mod columns, with either INSERT or
534                  * UPDATE privilege as specified by remainingPerms.
535                  */
536                 remainingPerms &= ~ACL_SELECT;
537                 if (remainingPerms != 0)
538                 {
539                         /*
540                          * When the query doesn't explicitly change any columns, allow the
541                          * query if we have permission on any column of the rel.  This is
542                          * to handle SELECT FOR UPDATE as well as possible corner cases in
543                          * INSERT and UPDATE.
544                          */
545                         if (bms_is_empty(rte->modifiedCols))
546                         {
547                                 if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
548                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
549                                         aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
550                                                                    get_rel_name(relOid));
551                         }
552
553                         tmpset = bms_copy(rte->modifiedCols);
554                         while ((col = bms_first_member(tmpset)) >= 0)
555                         {
556                                 /* remove the column number offset */
557                                 col += FirstLowInvalidHeapAttributeNumber;
558                                 if (col == InvalidAttrNumber)
559                                 {
560                                         /* whole-row reference can't happen here */
561                                         elog(ERROR, "whole-row update is not implemented");
562                                 }
563                                 else
564                                 {
565                                         if (pg_attribute_aclcheck(relOid, col, userid, remainingPerms)
566                                                 != ACLCHECK_OK)
567                                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
568                                                                            get_rel_name(relOid));
569                                 }
570                         }
571                         bms_free(tmpset);
572                 }
573         }
574 }
575
576 /*
577  * Check that the query does not imply any writes to non-temp tables.
578  *
579  * Note: in a Hot Standby slave this would need to reject writes to temp
580  * tables as well; but an HS slave can't have created any temp tables
581  * in the first place, so no need to check that.
582  */
583 static void
584 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
585 {
586         ListCell   *l;
587
588         /*
589          * CREATE TABLE AS or SELECT INTO?
590          *
591          * XXX should we allow this if the destination is temp?  Considering that
592          * it would still require catalog changes, probably not.
593          */
594         if (plannedstmt->intoClause != NULL)
595                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
596
597         /* Fail if write permissions are requested on any non-temp table */
598         foreach(l, plannedstmt->rtable)
599         {
600                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
601
602                 if (rte->rtekind != RTE_RELATION)
603                         continue;
604
605                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
606                         continue;
607
608                 if (isTempNamespace(get_rel_namespace(rte->relid)))
609                         continue;
610
611                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
612         }
613 }
614
615
616 /* ----------------------------------------------------------------
617  *              InitPlan
618  *
619  *              Initializes the query plan: open files, allocate storage
620  *              and start up the rule manager
621  * ----------------------------------------------------------------
622  */
623 static void
624 InitPlan(QueryDesc *queryDesc, int eflags)
625 {
626         CmdType         operation = queryDesc->operation;
627         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
628         Plan       *plan = plannedstmt->planTree;
629         List       *rangeTable = plannedstmt->rtable;
630         EState     *estate = queryDesc->estate;
631         PlanState  *planstate;
632         TupleDesc       tupType;
633         ListCell   *l;
634         int                     i;
635
636         /*
637          * Do permissions checks
638          */
639         ExecCheckRTPerms(rangeTable);
640
641         /*
642          * initialize the node's execution state
643          */
644         estate->es_range_table = rangeTable;
645         estate->es_plannedstmt = plannedstmt;
646
647         /*
648          * initialize result relation stuff, and open/lock the result rels.
649          *
650          * We must do this before initializing the plan tree, else we might try to
651          * do a lock upgrade if a result rel is also a source rel.
652          */
653         if (plannedstmt->resultRelations)
654         {
655                 List       *resultRelations = plannedstmt->resultRelations;
656                 int                     numResultRelations = list_length(resultRelations);
657                 ResultRelInfo *resultRelInfos;
658                 ResultRelInfo *resultRelInfo;
659
660                 resultRelInfos = (ResultRelInfo *)
661                         palloc(numResultRelations * sizeof(ResultRelInfo));
662                 resultRelInfo = resultRelInfos;
663                 foreach(l, resultRelations)
664                 {
665                         Index           resultRelationIndex = lfirst_int(l);
666                         Oid                     resultRelationOid;
667                         Relation        resultRelation;
668
669                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
670                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
671                         InitResultRelInfo(resultRelInfo,
672                                                           resultRelation,
673                                                           resultRelationIndex,
674                                                           operation,
675                                                           estate->es_instrument);
676                         resultRelInfo++;
677                 }
678                 estate->es_result_relations = resultRelInfos;
679                 estate->es_num_result_relations = numResultRelations;
680                 /* es_result_relation_info is NULL except when within ModifyTable */
681                 estate->es_result_relation_info = NULL;
682         }
683         else
684         {
685                 /*
686                  * if no result relation, then set state appropriately
687                  */
688                 estate->es_result_relations = NULL;
689                 estate->es_num_result_relations = 0;
690                 estate->es_result_relation_info = NULL;
691         }
692
693         /*
694          * Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
695          * before we initialize the plan tree, else we'd be risking lock upgrades.
696          * While we are at it, build the ExecRowMark list.
697          */
698         estate->es_rowMarks = NIL;
699         foreach(l, plannedstmt->rowMarks)
700         {
701                 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
702                 Oid                     relid;
703                 Relation        relation;
704                 ExecRowMark *erm;
705
706                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
707                 if (rc->isParent)
708                         continue;
709
710                 switch (rc->markType)
711                 {
712                         case ROW_MARK_EXCLUSIVE:
713                         case ROW_MARK_SHARE:
714                                 relid = getrelid(rc->rti, rangeTable);
715                                 relation = heap_open(relid, RowShareLock);
716                                 break;
717                         case ROW_MARK_REFERENCE:
718                                 relid = getrelid(rc->rti, rangeTable);
719                                 relation = heap_open(relid, AccessShareLock);
720                                 break;
721                         case ROW_MARK_COPY:
722                                 /* there's no real table here ... */
723                                 relation = NULL;
724                                 break;
725                         default:
726                                 elog(ERROR, "unrecognized markType: %d", rc->markType);
727                                 relation = NULL;        /* keep compiler quiet */
728                                 break;
729                 }
730
731                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
732                 erm->relation = relation;
733                 erm->rti = rc->rti;
734                 erm->prti = rc->prti;
735                 erm->markType = rc->markType;
736                 erm->noWait = rc->noWait;
737                 erm->ctidAttNo = rc->ctidAttNo;
738                 erm->toidAttNo = rc->toidAttNo;
739                 erm->wholeAttNo = rc->wholeAttNo;
740                 ItemPointerSetInvalid(&(erm->curCtid));
741                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
742         }
743
744         /*
745          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
746          * flag appropriately so that the plan tree will be initialized with the
747          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
748          */
749         estate->es_select_into = false;
750         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
751         {
752                 estate->es_select_into = true;
753                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
754         }
755
756         /*
757          * Initialize the executor's tuple table to empty.
758          */
759         estate->es_tupleTable = NIL;
760         estate->es_trig_tuple_slot = NULL;
761         estate->es_trig_oldtup_slot = NULL;
762
763         /* mark EvalPlanQual not active */
764         estate->es_epqTuple = NULL;
765         estate->es_epqTupleSet = NULL;
766         estate->es_epqScanDone = NULL;
767
768         /*
769          * Initialize private state information for each SubPlan.  We must do this
770          * before running ExecInitNode on the main query tree, since
771          * ExecInitSubPlan expects to be able to find these entries.
772          */
773         Assert(estate->es_subplanstates == NIL);
774         i = 1;                                          /* subplan indices count from 1 */
775         foreach(l, plannedstmt->subplans)
776         {
777                 Plan       *subplan = (Plan *) lfirst(l);
778                 PlanState  *subplanstate;
779                 int                     sp_eflags;
780
781                 /*
782                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
783                  * it is a parameterless subplan (not initplan), we suggest that it be
784                  * prepared to handle REWIND efficiently; otherwise there is no need.
785                  */
786                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
787                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
788                         sp_eflags |= EXEC_FLAG_REWIND;
789
790                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
791
792                 estate->es_subplanstates = lappend(estate->es_subplanstates,
793                                                                                    subplanstate);
794
795                 i++;
796         }
797
798         /*
799          * Initialize the private state information for all the nodes in the query
800          * tree.  This opens files, allocates storage and leaves us ready to start
801          * processing tuples.
802          */
803         planstate = ExecInitNode(plan, estate, eflags);
804
805         /*
806          * Get the tuple descriptor describing the type of tuples to return. (this
807          * is especially important if we are creating a relation with "SELECT
808          * INTO")
809          */
810         tupType = ExecGetResultType(planstate);
811
812         /*
813          * Initialize the junk filter if needed.  SELECT queries need a filter if
814          * there are any junk attrs in the top-level tlist.
815          */
816         if (operation == CMD_SELECT)
817         {
818                 bool            junk_filter_needed = false;
819                 ListCell   *tlist;
820
821                 foreach(tlist, plan->targetlist)
822                 {
823                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
824
825                         if (tle->resjunk)
826                         {
827                                 junk_filter_needed = true;
828                                 break;
829                         }
830                 }
831
832                 if (junk_filter_needed)
833                 {
834                         JunkFilter *j;
835
836                         j = ExecInitJunkFilter(planstate->plan->targetlist,
837                                                                    tupType->tdhasoid,
838                                                                    ExecInitExtraTupleSlot(estate));
839                         estate->es_junkFilter = j;
840
841                         /* Want to return the cleaned tuple type */
842                         tupType = j->jf_cleanTupType;
843                 }
844         }
845
846         queryDesc->tupDesc = tupType;
847         queryDesc->planstate = planstate;
848
849         /*
850          * If doing SELECT INTO, initialize the "into" relation.  We must wait
851          * till now so we have the "clean" result tuple type to create the new
852          * table from.
853          *
854          * If EXPLAIN, skip creating the "into" relation.
855          */
856         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
857                 OpenIntoRel(queryDesc);
858 }
859
860 /*
861  * Initialize ResultRelInfo data for one result relation
862  */
863 void
864 InitResultRelInfo(ResultRelInfo *resultRelInfo,
865                                   Relation resultRelationDesc,
866                                   Index resultRelationIndex,
867                                   CmdType operation,
868                                   int instrument_options)
869 {
870         /*
871          * Check valid relkind ... parser and/or planner should have noticed this
872          * already, but let's make sure.
873          */
874         switch (resultRelationDesc->rd_rel->relkind)
875         {
876                 case RELKIND_RELATION:
877                         /* OK */
878                         break;
879                 case RELKIND_SEQUENCE:
880                         ereport(ERROR,
881                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
882                                          errmsg("cannot change sequence \"%s\"",
883                                                         RelationGetRelationName(resultRelationDesc))));
884                         break;
885                 case RELKIND_TOASTVALUE:
886                         ereport(ERROR,
887                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
888                                          errmsg("cannot change TOAST relation \"%s\"",
889                                                         RelationGetRelationName(resultRelationDesc))));
890                         break;
891                 case RELKIND_VIEW:
892                         ereport(ERROR,
893                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
894                                          errmsg("cannot change view \"%s\"",
895                                                         RelationGetRelationName(resultRelationDesc))));
896                         break;
897                 default:
898                         ereport(ERROR,
899                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
900                                          errmsg("cannot change relation \"%s\"",
901                                                         RelationGetRelationName(resultRelationDesc))));
902                         break;
903         }
904
905         /* OK, fill in the node */
906         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
907         resultRelInfo->type = T_ResultRelInfo;
908         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
909         resultRelInfo->ri_RelationDesc = resultRelationDesc;
910         resultRelInfo->ri_NumIndices = 0;
911         resultRelInfo->ri_IndexRelationDescs = NULL;
912         resultRelInfo->ri_IndexRelationInfo = NULL;
913         /* make a copy so as not to depend on relcache info not changing... */
914         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
915         if (resultRelInfo->ri_TrigDesc)
916         {
917                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
918
919                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
920                         palloc0(n * sizeof(FmgrInfo));
921                 resultRelInfo->ri_TrigWhenExprs = (List **)
922                         palloc0(n * sizeof(List *));
923                 if (instrument_options)
924                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
925         }
926         else
927         {
928                 resultRelInfo->ri_TrigFunctions = NULL;
929                 resultRelInfo->ri_TrigWhenExprs = NULL;
930                 resultRelInfo->ri_TrigInstrument = NULL;
931         }
932         resultRelInfo->ri_ConstraintExprs = NULL;
933         resultRelInfo->ri_junkFilter = NULL;
934         resultRelInfo->ri_projectReturning = NULL;
935
936         /*
937          * If there are indices on the result relation, open them and save
938          * descriptors in the result relation info, so that we can add new index
939          * entries for the tuples we add/update.  We need not do this for a
940          * DELETE, however, since deletion doesn't affect indexes.
941          */
942         if (resultRelationDesc->rd_rel->relhasindex &&
943                 operation != CMD_DELETE)
944                 ExecOpenIndices(resultRelInfo);
945 }
946
947 /*
948  *              ExecGetTriggerResultRel
949  *
950  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
951  * triggers are fired on one of the result relations of the query, and so
952  * we can just return a member of the es_result_relations array.  (Note: in
953  * self-join situations there might be multiple members with the same OID;
954  * if so it doesn't matter which one we pick.)  However, it is sometimes
955  * necessary to fire triggers on other relations; this happens mainly when an
956  * RI update trigger queues additional triggers on other relations, which will
957  * be processed in the context of the outer query.      For efficiency's sake,
958  * we want to have a ResultRelInfo for those triggers too; that can avoid
959  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
960  * ANALYZE to report the runtimes of such triggers.)  So we make additional
961  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
962  */
963 ResultRelInfo *
964 ExecGetTriggerResultRel(EState *estate, Oid relid)
965 {
966         ResultRelInfo *rInfo;
967         int                     nr;
968         ListCell   *l;
969         Relation        rel;
970         MemoryContext oldcontext;
971
972         /* First, search through the query result relations */
973         rInfo = estate->es_result_relations;
974         nr = estate->es_num_result_relations;
975         while (nr > 0)
976         {
977                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
978                         return rInfo;
979                 rInfo++;
980                 nr--;
981         }
982         /* Nope, but maybe we already made an extra ResultRelInfo for it */
983         foreach(l, estate->es_trig_target_relations)
984         {
985                 rInfo = (ResultRelInfo *) lfirst(l);
986                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
987                         return rInfo;
988         }
989         /* Nope, so we need a new one */
990
991         /*
992          * Open the target relation's relcache entry.  We assume that an
993          * appropriate lock is still held by the backend from whenever the trigger
994          * event got queued, so we need take no new lock here.
995          */
996         rel = heap_open(relid, NoLock);
997
998         /*
999          * Make the new entry in the right context.  Currently, we don't need any
1000          * index information in ResultRelInfos used only for triggers, so tell
1001          * InitResultRelInfo it's a DELETE.
1002          */
1003         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1004         rInfo = makeNode(ResultRelInfo);
1005         InitResultRelInfo(rInfo,
1006                                           rel,
1007                                           0,            /* dummy rangetable index */
1008                                           CMD_DELETE,
1009                                           estate->es_instrument);
1010         estate->es_trig_target_relations =
1011                 lappend(estate->es_trig_target_relations, rInfo);
1012         MemoryContextSwitchTo(oldcontext);
1013
1014         return rInfo;
1015 }
1016
1017 /*
1018  *              ExecContextForcesOids
1019  *
1020  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1021  * we need to ensure that result tuples have space for an OID iff they are
1022  * going to be stored into a relation that has OIDs.  In other contexts
1023  * we are free to choose whether to leave space for OIDs in result tuples
1024  * (we generally don't want to, but we do if a physical-tlist optimization
1025  * is possible).  This routine checks the plan context and returns TRUE if the
1026  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1027  * *hasoids is set to the required value.
1028  *
1029  * One reason this is ugly is that all plan nodes in the plan tree will emit
1030  * tuples with space for an OID, though we really only need the topmost node
1031  * to do so.  However, node types like Sort don't project new tuples but just
1032  * return their inputs, and in those cases the requirement propagates down
1033  * to the input node.  Eventually we might make this code smart enough to
1034  * recognize how far down the requirement really goes, but for now we just
1035  * make all plan nodes do the same thing if the top level forces the choice.
1036  *
1037  * We assume that if we are generating tuples for INSERT or UPDATE,
1038  * estate->es_result_relation_info is already set up to describe the target
1039  * relation.  Note that in an UPDATE that spans an inheritance tree, some of
1040  * the target relations may have OIDs and some not.  We have to make the
1041  * decisions on a per-relation basis as we initialize each of the subplans of
1042  * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1043  * while initializing each subplan.
1044  *
1045  * SELECT INTO is even uglier, because we don't have the INTO relation's
1046  * descriptor available when this code runs; we have to look aside at a
1047  * flag set by InitPlan().
1048  */
1049 bool
1050 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1051 {
1052         ResultRelInfo *ri = planstate->state->es_result_relation_info;
1053
1054         if (ri != NULL)
1055         {
1056                 Relation        rel = ri->ri_RelationDesc;
1057
1058                 if (rel != NULL)
1059                 {
1060                         *hasoids = rel->rd_rel->relhasoids;
1061                         return true;
1062                 }
1063         }
1064
1065         if (planstate->state->es_select_into)
1066         {
1067                 *hasoids = planstate->state->es_into_oids;
1068                 return true;
1069         }
1070
1071         return false;
1072 }
1073
1074 /* ----------------------------------------------------------------
1075  *              ExecEndPlan
1076  *
1077  *              Cleans up the query plan -- closes files and frees up storage
1078  *
1079  * NOTE: we are no longer very worried about freeing storage per se
1080  * in this code; FreeExecutorState should be guaranteed to release all
1081  * memory that needs to be released.  What we are worried about doing
1082  * is closing relations and dropping buffer pins.  Thus, for example,
1083  * tuple tables must be cleared or dropped to ensure pins are released.
1084  * ----------------------------------------------------------------
1085  */
1086 static void
1087 ExecEndPlan(PlanState *planstate, EState *estate)
1088 {
1089         ResultRelInfo *resultRelInfo;
1090         int                     i;
1091         ListCell   *l;
1092
1093         /*
1094          * shut down the node-type-specific query processing
1095          */
1096         ExecEndNode(planstate);
1097
1098         /*
1099          * for subplans too
1100          */
1101         foreach(l, estate->es_subplanstates)
1102         {
1103                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1104
1105                 ExecEndNode(subplanstate);
1106         }
1107
1108         /*
1109          * destroy the executor's tuple table.  Actually we only care about
1110          * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1111          * the TupleTableSlots, since the containing memory context is about to go
1112          * away anyway.
1113          */
1114         ExecResetTupleTable(estate->es_tupleTable, false);
1115
1116         /*
1117          * close the result relation(s) if any, but hold locks until xact commit.
1118          */
1119         resultRelInfo = estate->es_result_relations;
1120         for (i = estate->es_num_result_relations; i > 0; i--)
1121         {
1122                 /* Close indices and then the relation itself */
1123                 ExecCloseIndices(resultRelInfo);
1124                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1125                 resultRelInfo++;
1126         }
1127
1128         /*
1129          * likewise close any trigger target relations
1130          */
1131         foreach(l, estate->es_trig_target_relations)
1132         {
1133                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1134                 /* Close indices and then the relation itself */
1135                 ExecCloseIndices(resultRelInfo);
1136                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1137         }
1138
1139         /*
1140          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1141          */
1142         foreach(l, estate->es_rowMarks)
1143         {
1144                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1145
1146                 if (erm->relation)
1147                         heap_close(erm->relation, NoLock);
1148         }
1149 }
1150
1151 /* ----------------------------------------------------------------
1152  *              ExecutePlan
1153  *
1154  *              Processes the query plan until we have processed 'numberTuples' tuples,
1155  *              moving in the specified direction.
1156  *
1157  *              Runs to completion if numberTuples is 0
1158  *
1159  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1160  * user can see it
1161  * ----------------------------------------------------------------
1162  */
1163 static void
1164 ExecutePlan(EState *estate,
1165                         PlanState *planstate,
1166                         CmdType operation,
1167                         bool sendTuples,
1168                         long numberTuples,
1169                         ScanDirection direction,
1170                         DestReceiver *dest)
1171 {
1172         TupleTableSlot *slot;
1173         long            current_tuple_count;
1174
1175         /*
1176          * initialize local variables
1177          */
1178         current_tuple_count = 0;
1179
1180         /*
1181          * Set the direction.
1182          */
1183         estate->es_direction = direction;
1184
1185         /*
1186          * Loop until we've processed the proper number of tuples from the plan.
1187          */
1188         for (;;)
1189         {
1190                 /* Reset the per-output-tuple exprcontext */
1191                 ResetPerTupleExprContext(estate);
1192
1193                 /*
1194                  * Execute the plan and obtain a tuple
1195                  */
1196                 slot = ExecProcNode(planstate);
1197
1198                 /*
1199                  * if the tuple is null, then we assume there is nothing more to
1200                  * process so we just end the loop...
1201                  */
1202                 if (TupIsNull(slot))
1203                         break;
1204
1205                 /*
1206                  * If we have a junk filter, then project a new tuple with the junk
1207                  * removed.
1208                  *
1209                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1210                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1211                  * because that tuple slot has the wrong descriptor.)
1212                  */
1213                 if (estate->es_junkFilter != NULL)
1214                         slot = ExecFilterJunk(estate->es_junkFilter, slot);
1215
1216                 /*
1217                  * If we are supposed to send the tuple somewhere, do so. (In
1218                  * practice, this is probably always the case at this point.)
1219                  */
1220                 if (sendTuples)
1221                         (*dest->receiveSlot) (slot, dest);
1222
1223                 /*
1224                  * Count tuples processed, if this is a SELECT.  (For other operation
1225                  * types, the ModifyTable plan node must count the appropriate
1226                  * events.)
1227                  */
1228                 if (operation == CMD_SELECT)
1229                         (estate->es_processed)++;
1230
1231                 /*
1232                  * check our tuple count.. if we've processed the proper number then
1233                  * quit, else loop again and process more tuples.  Zero numberTuples
1234                  * means no limit.
1235                  */
1236                 current_tuple_count++;
1237                 if (numberTuples && numberTuples == current_tuple_count)
1238                         break;
1239         }
1240 }
1241
1242
1243 /*
1244  * ExecRelCheck --- check that tuple meets constraints for result relation
1245  */
1246 static const char *
1247 ExecRelCheck(ResultRelInfo *resultRelInfo,
1248                          TupleTableSlot *slot, EState *estate)
1249 {
1250         Relation        rel = resultRelInfo->ri_RelationDesc;
1251         int                     ncheck = rel->rd_att->constr->num_check;
1252         ConstrCheck *check = rel->rd_att->constr->check;
1253         ExprContext *econtext;
1254         MemoryContext oldContext;
1255         List       *qual;
1256         int                     i;
1257
1258         /*
1259          * If first time through for this result relation, build expression
1260          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1261          * memory context so they'll survive throughout the query.
1262          */
1263         if (resultRelInfo->ri_ConstraintExprs == NULL)
1264         {
1265                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1266                 resultRelInfo->ri_ConstraintExprs =
1267                         (List **) palloc(ncheck * sizeof(List *));
1268                 for (i = 0; i < ncheck; i++)
1269                 {
1270                         /* ExecQual wants implicit-AND form */
1271                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1272                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1273                                 ExecPrepareExpr((Expr *) qual, estate);
1274                 }
1275                 MemoryContextSwitchTo(oldContext);
1276         }
1277
1278         /*
1279          * We will use the EState's per-tuple context for evaluating constraint
1280          * expressions (creating it if it's not already there).
1281          */
1282         econtext = GetPerTupleExprContext(estate);
1283
1284         /* Arrange for econtext's scan tuple to be the tuple under test */
1285         econtext->ecxt_scantuple = slot;
1286
1287         /* And evaluate the constraints */
1288         for (i = 0; i < ncheck; i++)
1289         {
1290                 qual = resultRelInfo->ri_ConstraintExprs[i];
1291
1292                 /*
1293                  * NOTE: SQL92 specifies that a NULL result from a constraint
1294                  * expression is not to be treated as a failure.  Therefore, tell
1295                  * ExecQual to return TRUE for NULL.
1296                  */
1297                 if (!ExecQual(qual, econtext, true))
1298                         return check[i].ccname;
1299         }
1300
1301         /* NULL result means no error */
1302         return NULL;
1303 }
1304
1305 void
1306 ExecConstraints(ResultRelInfo *resultRelInfo,
1307                                 TupleTableSlot *slot, EState *estate)
1308 {
1309         Relation        rel = resultRelInfo->ri_RelationDesc;
1310         TupleConstr *constr = rel->rd_att->constr;
1311
1312         Assert(constr);
1313
1314         if (constr->has_not_null)
1315         {
1316                 int                     natts = rel->rd_att->natts;
1317                 int                     attrChk;
1318
1319                 for (attrChk = 1; attrChk <= natts; attrChk++)
1320                 {
1321                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1322                                 slot_attisnull(slot, attrChk))
1323                                 ereport(ERROR,
1324                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1325                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1326                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1327                 }
1328         }
1329
1330         if (constr->num_check > 0)
1331         {
1332                 const char *failed;
1333
1334                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1335                         ereport(ERROR,
1336                                         (errcode(ERRCODE_CHECK_VIOLATION),
1337                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1338                                                         RelationGetRelationName(rel), failed)));
1339         }
1340 }
1341
1342
1343 /*
1344  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
1345  * process the updated version under READ COMMITTED rules.
1346  *
1347  * See backend/executor/README for some info about how this works.
1348  */
1349
1350
1351 /*
1352  * Check a modified tuple to see if we want to process its updated version
1353  * under READ COMMITTED rules.
1354  *
1355  *      estate - outer executor state data
1356  *      epqstate - state for EvalPlanQual rechecking
1357  *      relation - table containing tuple
1358  *      rti - rangetable index of table containing tuple
1359  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1360  *      priorXmax - t_xmax from the outdated tuple
1361  *
1362  * *tid is also an output parameter: it's modified to hold the TID of the
1363  * latest version of the tuple (note this may be changed even on failure)
1364  *
1365  * Returns a slot containing the new candidate update/delete tuple, or
1366  * NULL if we determine we shouldn't process the row.
1367  */
1368 TupleTableSlot *
1369 EvalPlanQual(EState *estate, EPQState *epqstate,
1370                          Relation relation, Index rti,
1371                          ItemPointer tid, TransactionId priorXmax)
1372 {
1373         TupleTableSlot *slot;
1374         HeapTuple       copyTuple;
1375
1376         Assert(rti > 0);
1377
1378         /*
1379          * Get and lock the updated version of the row; if fail, return NULL.
1380          */
1381         copyTuple = EvalPlanQualFetch(estate, relation, LockTupleExclusive,
1382                                                                   tid, priorXmax);
1383
1384         if (copyTuple == NULL)
1385                 return NULL;
1386
1387         /*
1388          * For UPDATE/DELETE we have to return tid of actual row we're executing
1389          * PQ for.
1390          */
1391         *tid = copyTuple->t_self;
1392
1393         /*
1394          * Need to run a recheck subquery.      Initialize or reinitialize EPQ state.
1395          */
1396         EvalPlanQualBegin(epqstate, estate);
1397
1398         /*
1399          * Free old test tuple, if any, and store new tuple where relation's scan
1400          * node will see it
1401          */
1402         EvalPlanQualSetTuple(epqstate, rti, copyTuple);
1403
1404         /*
1405          * Fetch any non-locked source rows
1406          */
1407         EvalPlanQualFetchRowMarks(epqstate);
1408
1409         /*
1410          * Run the EPQ query.  We assume it will return at most one tuple.
1411          */
1412         slot = EvalPlanQualNext(epqstate);
1413
1414         /*
1415          * If we got a tuple, force the slot to materialize the tuple so that it
1416          * is not dependent on any local state in the EPQ query (in particular,
1417          * it's highly likely that the slot contains references to any pass-by-ref
1418          * datums that may be present in copyTuple).  As with the next step, this
1419          * is to guard against early re-use of the EPQ query.
1420          */
1421         if (!TupIsNull(slot))
1422                 (void) ExecMaterializeSlot(slot);
1423
1424         /*
1425          * Clear out the test tuple.  This is needed in case the EPQ query is
1426          * re-used to test a tuple for a different relation.  (Not clear that can
1427          * really happen, but let's be safe.)
1428          */
1429         EvalPlanQualSetTuple(epqstate, rti, NULL);
1430
1431         return slot;
1432 }
1433
1434 /*
1435  * Fetch a copy of the newest version of an outdated tuple
1436  *
1437  *      estate - executor state data
1438  *      relation - table containing tuple
1439  *      lockmode - requested tuple lock mode
1440  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1441  *      priorXmax - t_xmax from the outdated tuple
1442  *
1443  * Returns a palloc'd copy of the newest tuple version, or NULL if we find
1444  * that there is no newest version (ie, the row was deleted not updated).
1445  * If successful, we have locked the newest tuple version, so caller does not
1446  * need to worry about it changing anymore.
1447  *
1448  * Note: properly, lockmode should be declared as enum LockTupleMode,
1449  * but we use "int" to avoid having to include heapam.h in executor.h.
1450  */
1451 HeapTuple
1452 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
1453                                   ItemPointer tid, TransactionId priorXmax)
1454 {
1455         HeapTuple       copyTuple = NULL;
1456         HeapTupleData tuple;
1457         SnapshotData SnapshotDirty;
1458
1459         /*
1460          * fetch target tuple
1461          *
1462          * Loop here to deal with updated or busy tuples
1463          */
1464         InitDirtySnapshot(SnapshotDirty);
1465         tuple.t_self = *tid;
1466         for (;;)
1467         {
1468                 Buffer          buffer;
1469
1470                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
1471                 {
1472                         HTSU_Result test;
1473                         ItemPointerData update_ctid;
1474                         TransactionId update_xmax;
1475
1476                         /*
1477                          * If xmin isn't what we're expecting, the slot must have been
1478                          * recycled and reused for an unrelated tuple.  This implies that
1479                          * the latest version of the row was deleted, so we need do
1480                          * nothing.  (Should be safe to examine xmin without getting
1481                          * buffer's content lock, since xmin never changes in an existing
1482                          * tuple.)
1483                          */
1484                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1485                                                                          priorXmax))
1486                         {
1487                                 ReleaseBuffer(buffer);
1488                                 return NULL;
1489                         }
1490
1491                         /* otherwise xmin should not be dirty... */
1492                         if (TransactionIdIsValid(SnapshotDirty.xmin))
1493                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1494
1495                         /*
1496                          * If tuple is being updated by other transaction then we have to
1497                          * wait for its commit/abort.
1498                          */
1499                         if (TransactionIdIsValid(SnapshotDirty.xmax))
1500                         {
1501                                 ReleaseBuffer(buffer);
1502                                 XactLockTableWait(SnapshotDirty.xmax);
1503                                 continue;               /* loop back to repeat heap_fetch */
1504                         }
1505
1506                         /*
1507                          * If tuple was inserted by our own transaction, we have to check
1508                          * cmin against es_output_cid: cmin >= current CID means our
1509                          * command cannot see the tuple, so we should ignore it.  Without
1510                          * this we are open to the "Halloween problem" of indefinitely
1511                          * re-updating the same tuple. (We need not check cmax because
1512                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
1513                          * transaction dead, regardless of cmax.)  We just checked that
1514                          * priorXmax == xmin, so we can test that variable instead of
1515                          * doing HeapTupleHeaderGetXmin again.
1516                          */
1517                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1518                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
1519                         {
1520                                 ReleaseBuffer(buffer);
1521                                 return NULL;
1522                         }
1523
1524                         /*
1525                          * This is a live tuple, so now try to lock it.
1526                          */
1527                         test = heap_lock_tuple(relation, &tuple, &buffer,
1528                                                                    &update_ctid, &update_xmax,
1529                                                                    estate->es_output_cid,
1530                                                                    lockmode, false);
1531                         /* We now have two pins on the buffer, get rid of one */
1532                         ReleaseBuffer(buffer);
1533
1534                         switch (test)
1535                         {
1536                                 case HeapTupleSelfUpdated:
1537                                         /* treat it as deleted; do not process */
1538                                         ReleaseBuffer(buffer);
1539                                         return NULL;
1540
1541                                 case HeapTupleMayBeUpdated:
1542                                         /* successfully locked */
1543                                         break;
1544
1545                                 case HeapTupleUpdated:
1546                                         ReleaseBuffer(buffer);
1547                                         if (IsXactIsoLevelSerializable)
1548                                                 ereport(ERROR,
1549                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1550                                                                  errmsg("could not serialize access due to concurrent update")));
1551                                         if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
1552                                         {
1553                                                 /* it was updated, so look at the updated version */
1554                                                 tuple.t_self = update_ctid;
1555                                                 /* updated row should have xmin matching this xmax */
1556                                                 priorXmax = update_xmax;
1557                                                 continue;
1558                                         }
1559                                         /* tuple was deleted, so give up */
1560                                         return NULL;
1561
1562                                 default:
1563                                         ReleaseBuffer(buffer);
1564                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1565                                                  test);
1566                                         return NULL;    /* keep compiler quiet */
1567                         }
1568
1569                         /*
1570                          * We got tuple - now copy it for use by recheck query.
1571                          */
1572                         copyTuple = heap_copytuple(&tuple);
1573                         ReleaseBuffer(buffer);
1574                         break;
1575                 }
1576
1577                 /*
1578                  * If the referenced slot was actually empty, the latest version of
1579                  * the row must have been deleted, so we need do nothing.
1580                  */
1581                 if (tuple.t_data == NULL)
1582                 {
1583                         ReleaseBuffer(buffer);
1584                         return NULL;
1585                 }
1586
1587                 /*
1588                  * As above, if xmin isn't what we're expecting, do nothing.
1589                  */
1590                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1591                                                                  priorXmax))
1592                 {
1593                         ReleaseBuffer(buffer);
1594                         return NULL;
1595                 }
1596
1597                 /*
1598                  * If we get here, the tuple was found but failed SnapshotDirty.
1599                  * Assuming the xmin is either a committed xact or our own xact (as it
1600                  * certainly should be if we're trying to modify the tuple), this must
1601                  * mean that the row was updated or deleted by either a committed xact
1602                  * or our own xact.  If it was deleted, we can ignore it; if it was
1603                  * updated then chain up to the next version and repeat the whole
1604                  * process.
1605                  *
1606                  * As above, it should be safe to examine xmax and t_ctid without the
1607                  * buffer content lock, because they can't be changing.
1608                  */
1609                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
1610                 {
1611                         /* deleted, so forget about it */
1612                         ReleaseBuffer(buffer);
1613                         return NULL;
1614                 }
1615
1616                 /* updated, so look at the updated row */
1617                 tuple.t_self = tuple.t_data->t_ctid;
1618                 /* updated row should have xmin matching this xmax */
1619                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
1620                 ReleaseBuffer(buffer);
1621                 /* loop back to fetch next in chain */
1622         }
1623
1624         /*
1625          * Return the copied tuple
1626          */
1627         return copyTuple;
1628 }
1629
1630 /*
1631  * EvalPlanQualInit -- initialize during creation of a plan state node
1632  * that might need to invoke EPQ processing.
1633  * Note: subplan can be NULL if it will be set later with EvalPlanQualSetPlan.
1634  */
1635 void
1636 EvalPlanQualInit(EPQState *epqstate, EState *estate,
1637                                  Plan *subplan, int epqParam)
1638 {
1639         /* Mark the EPQ state inactive */
1640         epqstate->estate = NULL;
1641         epqstate->planstate = NULL;
1642         epqstate->origslot = NULL;
1643         /* ... and remember data that EvalPlanQualBegin will need */
1644         epqstate->plan = subplan;
1645         epqstate->rowMarks = NIL;
1646         epqstate->epqParam = epqParam;
1647 }
1648
1649 /*
1650  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
1651  *
1652  * We need this so that ModifyTuple can deal with multiple subplans.
1653  */
1654 void
1655 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan)
1656 {
1657         /* If we have a live EPQ query, shut it down */
1658         EvalPlanQualEnd(epqstate);
1659         /* And set/change the plan pointer */
1660         epqstate->plan = subplan;
1661 }
1662
1663 /*
1664  * EvalPlanQualAddRowMark -- add an ExecRowMark that EPQ needs to handle.
1665  *
1666  * Currently, only non-locking RowMarks are supported.
1667  */
1668 void
1669 EvalPlanQualAddRowMark(EPQState *epqstate, ExecRowMark *erm)
1670 {
1671         if (RowMarkRequiresRowShareLock(erm->markType))
1672                 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
1673         epqstate->rowMarks = lappend(epqstate->rowMarks, erm);
1674 }
1675
1676 /*
1677  * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
1678  *
1679  * NB: passed tuple must be palloc'd; it may get freed later
1680  */
1681 void
1682 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
1683 {
1684         EState     *estate = epqstate->estate;
1685
1686         Assert(rti > 0);
1687
1688         /*
1689          * free old test tuple, if any, and store new tuple where relation's scan
1690          * node will see it
1691          */
1692         if (estate->es_epqTuple[rti - 1] != NULL)
1693                 heap_freetuple(estate->es_epqTuple[rti - 1]);
1694         estate->es_epqTuple[rti - 1] = tuple;
1695         estate->es_epqTupleSet[rti - 1] = true;
1696 }
1697
1698 /*
1699  * Fetch back the current test tuple (if any) for the specified RTI
1700  */
1701 HeapTuple
1702 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
1703 {
1704         EState     *estate = epqstate->estate;
1705
1706         Assert(rti > 0);
1707
1708         return estate->es_epqTuple[rti - 1];
1709 }
1710
1711 /*
1712  * Fetch the current row values for any non-locked relations that need
1713  * to be scanned by an EvalPlanQual operation.  origslot must have been set
1714  * to contain the current result row (top-level row) that we need to recheck.
1715  */
1716 void
1717 EvalPlanQualFetchRowMarks(EPQState *epqstate)
1718 {
1719         ListCell   *l;
1720
1721         Assert(epqstate->origslot != NULL);
1722
1723         foreach(l, epqstate->rowMarks)
1724         {
1725                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1726                 Datum           datum;
1727                 bool            isNull;
1728                 HeapTupleData tuple;
1729
1730                 /* clear any leftover test tuple for this rel */
1731                 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
1732
1733                 if (erm->relation)
1734                 {
1735                         Buffer          buffer;
1736
1737                         Assert(erm->markType == ROW_MARK_REFERENCE);
1738
1739                         /* if child rel, must check whether it produced this row */
1740                         if (erm->rti != erm->prti)
1741                         {
1742                                 Oid                     tableoid;
1743
1744                                 datum = ExecGetJunkAttribute(epqstate->origslot,
1745                                                                                          erm->toidAttNo,
1746                                                                                          &isNull);
1747                                 /* non-locked rels could be on the inside of outer joins */
1748                                 if (isNull)
1749                                         continue;
1750                                 tableoid = DatumGetObjectId(datum);
1751
1752                                 if (tableoid != RelationGetRelid(erm->relation))
1753                                 {
1754                                         /* this child is inactive right now */
1755                                         continue;
1756                                 }
1757                         }
1758
1759                         /* fetch the tuple's ctid */
1760                         datum = ExecGetJunkAttribute(epqstate->origslot,
1761                                                                                  erm->ctidAttNo,
1762                                                                                  &isNull);
1763                         /* non-locked rels could be on the inside of outer joins */
1764                         if (isNull)
1765                                 continue;
1766                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1767
1768                         /* okay, fetch the tuple */
1769                         if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
1770                                                         false, NULL))
1771                                 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
1772
1773                         /* successful, copy and store tuple */
1774                         EvalPlanQualSetTuple(epqstate, erm->rti,
1775                                                                  heap_copytuple(&tuple));
1776                         ReleaseBuffer(buffer);
1777                 }
1778                 else
1779                 {
1780                         HeapTupleHeader td;
1781
1782                         Assert(erm->markType == ROW_MARK_COPY);
1783
1784                         /* fetch the whole-row Var for the relation */
1785                         datum = ExecGetJunkAttribute(epqstate->origslot,
1786                                                                                  erm->wholeAttNo,
1787                                                                                  &isNull);
1788                         /* non-locked rels could be on the inside of outer joins */
1789                         if (isNull)
1790                                 continue;
1791                         td = DatumGetHeapTupleHeader(datum);
1792
1793                         /* build a temporary HeapTuple control structure */
1794                         tuple.t_len = HeapTupleHeaderGetDatumLength(td);
1795                         ItemPointerSetInvalid(&(tuple.t_self));
1796                         tuple.t_tableOid = InvalidOid;
1797                         tuple.t_data = td;
1798
1799                         /* copy and store tuple */
1800                         EvalPlanQualSetTuple(epqstate, erm->rti,
1801                                                                  heap_copytuple(&tuple));
1802                 }
1803         }
1804 }
1805
1806 /*
1807  * Fetch the next row (if any) from EvalPlanQual testing
1808  *
1809  * (In practice, there should never be more than one row...)
1810  */
1811 TupleTableSlot *
1812 EvalPlanQualNext(EPQState *epqstate)
1813 {
1814         MemoryContext oldcontext;
1815         TupleTableSlot *slot;
1816
1817         oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
1818         slot = ExecProcNode(epqstate->planstate);
1819         MemoryContextSwitchTo(oldcontext);
1820
1821         return slot;
1822 }
1823
1824 /*
1825  * Initialize or reset an EvalPlanQual state tree
1826  */
1827 void
1828 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
1829 {
1830         EState     *estate = epqstate->estate;
1831
1832         if (estate == NULL)
1833         {
1834                 /* First time through, so create a child EState */
1835                 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
1836         }
1837         else
1838         {
1839                 /*
1840                  * We already have a suitable child EPQ tree, so just reset it.
1841                  */
1842                 int                     rtsize = list_length(parentestate->es_range_table);
1843                 PlanState  *planstate = epqstate->planstate;
1844
1845                 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
1846
1847                 /* Recopy current values of parent parameters */
1848                 if (parentestate->es_plannedstmt->nParamExec > 0)
1849                 {
1850                         int                     i = parentestate->es_plannedstmt->nParamExec;
1851
1852                         while (--i >= 0)
1853                         {
1854                                 /* copy value if any, but not execPlan link */
1855                                 estate->es_param_exec_vals[i].value =
1856                                         parentestate->es_param_exec_vals[i].value;
1857                                 estate->es_param_exec_vals[i].isnull =
1858                                         parentestate->es_param_exec_vals[i].isnull;
1859                         }
1860                 }
1861
1862                 /*
1863                  * Mark child plan tree as needing rescan at all scan nodes.  The
1864                  * first ExecProcNode will take care of actually doing the rescan.
1865                  */
1866                 planstate->chgParam = bms_add_member(planstate->chgParam,
1867                                                                                          epqstate->epqParam);
1868         }
1869 }
1870
1871 /*
1872  * Start execution of an EvalPlanQual plan tree.
1873  *
1874  * This is a cut-down version of ExecutorStart(): we copy some state from
1875  * the top-level estate rather than initializing it fresh.
1876  */
1877 static void
1878 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
1879 {
1880         EState     *estate;
1881         int                     rtsize;
1882         MemoryContext oldcontext;
1883         ListCell   *l;
1884
1885         rtsize = list_length(parentestate->es_range_table);
1886
1887         epqstate->estate = estate = CreateExecutorState();
1888
1889         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1890
1891         /*
1892          * Child EPQ EStates share the parent's copy of unchanging state such as
1893          * the snapshot, rangetable, result-rel info, and external Param info.
1894          * They need their own copies of local state, including a tuple table,
1895          * es_param_exec_vals, etc.
1896          */
1897         estate->es_direction = ForwardScanDirection;
1898         estate->es_snapshot = parentestate->es_snapshot;
1899         estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
1900         estate->es_range_table = parentestate->es_range_table;
1901         estate->es_plannedstmt = parentestate->es_plannedstmt;
1902         estate->es_junkFilter = parentestate->es_junkFilter;
1903         estate->es_output_cid = parentestate->es_output_cid;
1904         estate->es_result_relations = parentestate->es_result_relations;
1905         estate->es_num_result_relations = parentestate->es_num_result_relations;
1906         estate->es_result_relation_info = parentestate->es_result_relation_info;
1907         /* es_trig_target_relations must NOT be copied */
1908         estate->es_rowMarks = parentestate->es_rowMarks;
1909         estate->es_instrument = parentestate->es_instrument;
1910         estate->es_select_into = parentestate->es_select_into;
1911         estate->es_into_oids = parentestate->es_into_oids;
1912
1913         /*
1914          * The external param list is simply shared from parent.  The internal
1915          * param workspace has to be local state, but we copy the initial values
1916          * from the parent, so as to have access to any param values that were
1917          * already set from other parts of the parent's plan tree.
1918          */
1919         estate->es_param_list_info = parentestate->es_param_list_info;
1920         if (parentestate->es_plannedstmt->nParamExec > 0)
1921         {
1922                 int                     i = parentestate->es_plannedstmt->nParamExec;
1923
1924                 estate->es_param_exec_vals = (ParamExecData *)
1925                         palloc0(i * sizeof(ParamExecData));
1926                 while (--i >= 0)
1927                 {
1928                         /* copy value if any, but not execPlan link */
1929                         estate->es_param_exec_vals[i].value =
1930                                 parentestate->es_param_exec_vals[i].value;
1931                         estate->es_param_exec_vals[i].isnull =
1932                                 parentestate->es_param_exec_vals[i].isnull;
1933                 }
1934         }
1935
1936         /*
1937          * Each EState must have its own es_epqScanDone state, but if we have
1938          * nested EPQ checks they should share es_epqTuple arrays.      This allows
1939          * sub-rechecks to inherit the values being examined by an outer recheck.
1940          */
1941         estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
1942         if (parentestate->es_epqTuple != NULL)
1943         {
1944                 estate->es_epqTuple = parentestate->es_epqTuple;
1945                 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
1946         }
1947         else
1948         {
1949                 estate->es_epqTuple = (HeapTuple *)
1950                         palloc0(rtsize * sizeof(HeapTuple));
1951                 estate->es_epqTupleSet = (bool *)
1952                         palloc0(rtsize * sizeof(bool));
1953         }
1954
1955         /*
1956          * Each estate also has its own tuple table.
1957          */
1958         estate->es_tupleTable = NIL;
1959
1960         /*
1961          * Initialize private state information for each SubPlan.  We must do this
1962          * before running ExecInitNode on the main query tree, since
1963          * ExecInitSubPlan expects to be able to find these entries. Some of the
1964          * SubPlans might not be used in the part of the plan tree we intend to
1965          * run, but since it's not easy to tell which, we just initialize them
1966          * all.
1967          */
1968         Assert(estate->es_subplanstates == NIL);
1969         foreach(l, parentestate->es_plannedstmt->subplans)
1970         {
1971                 Plan       *subplan = (Plan *) lfirst(l);
1972                 PlanState  *subplanstate;
1973
1974                 subplanstate = ExecInitNode(subplan, estate, 0);
1975
1976                 estate->es_subplanstates = lappend(estate->es_subplanstates,
1977                                                                                    subplanstate);
1978         }
1979
1980         /*
1981          * Initialize the private state information for all the nodes in the part
1982          * of the plan tree we need to run.  This opens files, allocates storage
1983          * and leaves us ready to start processing tuples.
1984          */
1985         epqstate->planstate = ExecInitNode(planTree, estate, 0);
1986
1987         MemoryContextSwitchTo(oldcontext);
1988 }
1989
1990 /*
1991  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
1992  * or if we are done with the current EPQ child.
1993  *
1994  * This is a cut-down version of ExecutorEnd(); basically we want to do most
1995  * of the normal cleanup, but *not* close result relations (which we are
1996  * just sharing from the outer query).  We do, however, have to close any
1997  * trigger target relations that got opened, since those are not shared.
1998  * (There probably shouldn't be any of the latter, but just in case...)
1999  */
2000 void
2001 EvalPlanQualEnd(EPQState *epqstate)
2002 {
2003         EState     *estate = epqstate->estate;
2004         MemoryContext oldcontext;
2005         ListCell   *l;
2006
2007         if (estate == NULL)
2008                 return;                                 /* idle, so nothing to do */
2009
2010         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2011
2012         ExecEndNode(epqstate->planstate);
2013
2014         foreach(l, estate->es_subplanstates)
2015         {
2016                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2017
2018                 ExecEndNode(subplanstate);
2019         }
2020
2021         /* throw away the per-estate tuple table */
2022         ExecResetTupleTable(estate->es_tupleTable, false);
2023
2024         /* close any trigger target relations attached to this EState */
2025         foreach(l, estate->es_trig_target_relations)
2026         {
2027                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2028
2029                 /* Close indices and then the relation itself */
2030                 ExecCloseIndices(resultRelInfo);
2031                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2032         }
2033
2034         MemoryContextSwitchTo(oldcontext);
2035
2036         FreeExecutorState(estate);
2037
2038         /* Mark EPQState idle */
2039         epqstate->estate = NULL;
2040         epqstate->planstate = NULL;
2041         epqstate->origslot = NULL;
2042 }
2043
2044
2045 /*
2046  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2047  *
2048  * We implement SELECT INTO by diverting SELECT's normal output with
2049  * a specialized DestReceiver type.
2050  */
2051
2052 typedef struct
2053 {
2054         DestReceiver pub;                       /* publicly-known function pointers */
2055         EState     *estate;                     /* EState we are working with */
2056         Relation        rel;                    /* Relation to write to */
2057         int                     hi_options;             /* heap_insert performance options */
2058         BulkInsertState bistate;        /* bulk insert state */
2059 } DR_intorel;
2060
2061 /*
2062  * OpenIntoRel --- actually create the SELECT INTO target relation
2063  *
2064  * This also replaces QueryDesc->dest with the special DestReceiver for
2065  * SELECT INTO.  We assume that the correct result tuple type has already
2066  * been placed in queryDesc->tupDesc.
2067  */
2068 static void
2069 OpenIntoRel(QueryDesc *queryDesc)
2070 {
2071         IntoClause *into = queryDesc->plannedstmt->intoClause;
2072         EState     *estate = queryDesc->estate;
2073         Relation        intoRelationDesc;
2074         char       *intoName;
2075         Oid                     namespaceId;
2076         Oid                     tablespaceId;
2077         Datum           reloptions;
2078         AclResult       aclresult;
2079         Oid                     intoRelationId;
2080         TupleDesc       tupdesc;
2081         DR_intorel *myState;
2082         static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
2083
2084         Assert(into);
2085
2086         /*
2087          * XXX This code needs to be kept in sync with DefineRelation(). Maybe we
2088          * should try to use that function instead.
2089          */
2090
2091         /*
2092          * Check consistency of arguments
2093          */
2094         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2095                 ereport(ERROR,
2096                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2097                                  errmsg("ON COMMIT can only be used on temporary tables")));
2098
2099         /*
2100          * Security check: disallow creating temp tables from security-restricted
2101          * code.  This is needed because calling code might not expect untrusted
2102          * tables to appear in pg_temp at the front of its search path.
2103          */
2104         if (into->rel->istemp && InSecurityRestrictedOperation())
2105                 ereport(ERROR,
2106                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2107                                  errmsg("cannot create temporary table within security-restricted operation")));
2108
2109         /*
2110          * Find namespace to create in, check its permissions
2111          */
2112         intoName = into->rel->relname;
2113         namespaceId = RangeVarGetCreationNamespace(into->rel);
2114
2115         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2116                                                                           ACL_CREATE);
2117         if (aclresult != ACLCHECK_OK)
2118                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2119                                            get_namespace_name(namespaceId));
2120
2121         /*
2122          * Select tablespace to use.  If not specified, use default tablespace
2123          * (which may in turn default to database's default).
2124          */
2125         if (into->tableSpaceName)
2126         {
2127                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2128                 if (!OidIsValid(tablespaceId))
2129                         ereport(ERROR,
2130                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2131                                          errmsg("tablespace \"%s\" does not exist",
2132                                                         into->tableSpaceName)));
2133         }
2134         else
2135         {
2136                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2137                 /* note InvalidOid is OK in this case */
2138         }
2139
2140         /* Check permissions except when using the database's default space */
2141         if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2142         {
2143                 AclResult       aclresult;
2144
2145                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2146                                                                                    ACL_CREATE);
2147
2148                 if (aclresult != ACLCHECK_OK)
2149                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2150                                                    get_tablespace_name(tablespaceId));
2151         }
2152
2153         /* Parse and validate any reloptions */
2154         reloptions = transformRelOptions((Datum) 0,
2155                                                                          into->options,
2156                                                                          NULL,
2157                                                                          validnsps,
2158                                                                          true,
2159                                                                          false);
2160         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2161
2162         /* Copy the tupdesc because heap_create_with_catalog modifies it */
2163         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2164
2165         /* Now we can actually create the new relation */
2166         intoRelationId = heap_create_with_catalog(intoName,
2167                                                                                           namespaceId,
2168                                                                                           tablespaceId,
2169                                                                                           InvalidOid,
2170                                                                                           InvalidOid,
2171                                                                                           InvalidOid,
2172                                                                                           GetUserId(),
2173                                                                                           tupdesc,
2174                                                                                           NIL,
2175                                                                                           RELKIND_RELATION,
2176                                                                                           false,
2177                                                                                           false,
2178                                                                                           true,
2179                                                                                           0,
2180                                                                                           into->onCommit,
2181                                                                                           reloptions,
2182                                                                                           true,
2183                                                                                           allowSystemTableMods);
2184
2185         FreeTupleDesc(tupdesc);
2186
2187         /*
2188          * Advance command counter so that the newly-created relation's catalog
2189          * tuples will be visible to heap_open.
2190          */
2191         CommandCounterIncrement();
2192
2193         /*
2194          * If necessary, create a TOAST table for the INTO relation. Note that
2195          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2196          * the TOAST table will be visible for insertion.
2197          */
2198         reloptions = transformRelOptions((Datum) 0,
2199                                                                          into->options,
2200                                                                          "toast",
2201                                                                          validnsps,
2202                                                                          true,
2203                                                                          false);
2204
2205         (void) heap_reloptions(RELKIND_TOASTVALUE, reloptions, true);
2206
2207         AlterTableCreateToastTable(intoRelationId, reloptions);
2208
2209         /*
2210          * And open the constructed table for writing.
2211          */
2212         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2213
2214         /*
2215          * Now replace the query's DestReceiver with one for SELECT INTO
2216          */
2217         queryDesc->dest = CreateDestReceiver(DestIntoRel);
2218         myState = (DR_intorel *) queryDesc->dest;
2219         Assert(myState->pub.mydest == DestIntoRel);
2220         myState->estate = estate;
2221         myState->rel = intoRelationDesc;
2222
2223         /*
2224          * We can skip WAL-logging the insertions, unless PITR or streaming
2225          * replication is in use. We can skip the FSM in any case.
2226          */
2227         myState->hi_options = HEAP_INSERT_SKIP_FSM |
2228                 (XLogIsNeeded() ? 0 : HEAP_INSERT_SKIP_WAL);
2229         myState->bistate = GetBulkInsertState();
2230
2231         /* Not using WAL requires smgr_targblock be initially invalid */
2232         Assert(RelationGetTargetBlock(intoRelationDesc) == InvalidBlockNumber);
2233 }
2234
2235 /*
2236  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2237  */
2238 static void
2239 CloseIntoRel(QueryDesc *queryDesc)
2240 {
2241         DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2242
2243         /* OpenIntoRel might never have gotten called */
2244         if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2245         {
2246                 FreeBulkInsertState(myState->bistate);
2247
2248                 /* If we skipped using WAL, must heap_sync before commit */
2249                 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2250                         heap_sync(myState->rel);
2251
2252                 /* close rel, but keep lock until commit */
2253                 heap_close(myState->rel, NoLock);
2254
2255                 myState->rel = NULL;
2256         }
2257 }
2258
2259 /*
2260  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2261  */
2262 DestReceiver *
2263 CreateIntoRelDestReceiver(void)
2264 {
2265         DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2266
2267         self->pub.receiveSlot = intorel_receive;
2268         self->pub.rStartup = intorel_startup;
2269         self->pub.rShutdown = intorel_shutdown;
2270         self->pub.rDestroy = intorel_destroy;
2271         self->pub.mydest = DestIntoRel;
2272
2273         /* private fields will be set by OpenIntoRel */
2274
2275         return (DestReceiver *) self;
2276 }
2277
2278 /*
2279  * intorel_startup --- executor startup
2280  */
2281 static void
2282 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2283 {
2284         /* no-op */
2285 }
2286
2287 /*
2288  * intorel_receive --- receive one tuple
2289  */
2290 static void
2291 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2292 {
2293         DR_intorel *myState = (DR_intorel *) self;
2294         HeapTuple       tuple;
2295
2296         /*
2297          * get the heap tuple out of the tuple table slot, making sure we have a
2298          * writable copy
2299          */
2300         tuple = ExecMaterializeSlot(slot);
2301
2302         /*
2303          * force assignment of new OID (see comments in ExecInsert)
2304          */
2305         if (myState->rel->rd_rel->relhasoids)
2306                 HeapTupleSetOid(tuple, InvalidOid);
2307
2308         heap_insert(myState->rel,
2309                                 tuple,
2310                                 myState->estate->es_output_cid,
2311                                 myState->hi_options,
2312                                 myState->bistate);
2313
2314         /* We know this is a newly created relation, so there are no indexes */
2315 }
2316
2317 /*
2318  * intorel_shutdown --- executor end
2319  */
2320 static void
2321 intorel_shutdown(DestReceiver *self)
2322 {
2323         /* no-op */
2324 }
2325
2326 /*
2327  * intorel_destroy --- release DestReceiver object
2328  */
2329 static void
2330 intorel_destroy(DestReceiver *self)
2331 {
2332         pfree(self);
2333 }