OSDN Git Service

Standardize get_whatever_oid functions for object types with
[pg-rex/syncrep.git] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.354 2010/08/05 14:45:02 rhaas Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/reloptions.h"
36 #include "access/sysattr.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "miscadmin.h"
47 #include "optimizer/clauses.h"
48 #include "parser/parse_clause.h"
49 #include "parser/parsetree.h"
50 #include "storage/bufmgr.h"
51 #include "storage/lmgr.h"
52 #include "storage/smgr.h"
53 #include "tcop/utility.h"
54 #include "utils/acl.h"
55 #include "utils/lsyscache.h"
56 #include "utils/memutils.h"
57 #include "utils/snapmgr.h"
58 #include "utils/tqual.h"
59
60
61 /* Hooks for plugins to get control in ExecutorStart/Run/End() */
62 ExecutorStart_hook_type ExecutorStart_hook = NULL;
63 ExecutorRun_hook_type ExecutorRun_hook = NULL;
64 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
65
66 /* Hook for plugin to get control in ExecCheckRTPerms() */
67 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
68
69 /* decls for local routines only used within this module */
70 static void InitPlan(QueryDesc *queryDesc, int eflags);
71 static void ExecEndPlan(PlanState *planstate, EState *estate);
72 static void ExecutePlan(EState *estate, PlanState *planstate,
73                         CmdType operation,
74                         bool sendTuples,
75                         long numberTuples,
76                         ScanDirection direction,
77                         DestReceiver *dest);
78 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
79 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
80 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
81                                   Plan *planTree);
82 static void OpenIntoRel(QueryDesc *queryDesc);
83 static void CloseIntoRel(QueryDesc *queryDesc);
84 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
85 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
86 static void intorel_shutdown(DestReceiver *self);
87 static void intorel_destroy(DestReceiver *self);
88
89 /* end of local decls */
90
91
92 /* ----------------------------------------------------------------
93  *              ExecutorStart
94  *
95  *              This routine must be called at the beginning of any execution of any
96  *              query plan
97  *
98  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
99  * clear why we bother to separate the two functions, but...).  The tupDesc
100  * field of the QueryDesc is filled in to describe the tuples that will be
101  * returned, and the internal fields (estate and planstate) are set up.
102  *
103  * eflags contains flag bits as described in executor.h.
104  *
105  * NB: the CurrentMemoryContext when this is called will become the parent
106  * of the per-query context used for this Executor invocation.
107  *
108  * We provide a function hook variable that lets loadable plugins
109  * get control when ExecutorStart is called.  Such a plugin would
110  * normally call standard_ExecutorStart().
111  *
112  * ----------------------------------------------------------------
113  */
114 void
115 ExecutorStart(QueryDesc *queryDesc, int eflags)
116 {
117         if (ExecutorStart_hook)
118                 (*ExecutorStart_hook) (queryDesc, eflags);
119         else
120                 standard_ExecutorStart(queryDesc, eflags);
121 }
122
123 void
124 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
125 {
126         EState     *estate;
127         MemoryContext oldcontext;
128
129         /* sanity checks: queryDesc must not be started already */
130         Assert(queryDesc != NULL);
131         Assert(queryDesc->estate == NULL);
132
133         /*
134          * If the transaction is read-only, we need to check if any writes are
135          * planned to non-temporary tables.  EXPLAIN is considered read-only.
136          */
137         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
138                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
139
140         /*
141          * Build EState, switch into per-query memory context for startup.
142          */
143         estate = CreateExecutorState();
144         queryDesc->estate = estate;
145
146         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
147
148         /*
149          * Fill in external parameters, if any, from queryDesc; and allocate
150          * workspace for internal parameters
151          */
152         estate->es_param_list_info = queryDesc->params;
153
154         if (queryDesc->plannedstmt->nParamExec > 0)
155                 estate->es_param_exec_vals = (ParamExecData *)
156                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
157
158         /*
159          * If non-read-only query, set the command ID to mark output tuples with
160          */
161         switch (queryDesc->operation)
162         {
163                 case CMD_SELECT:
164                         /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
165                         if (queryDesc->plannedstmt->intoClause != NULL ||
166                                 queryDesc->plannedstmt->rowMarks != NIL)
167                                 estate->es_output_cid = GetCurrentCommandId(true);
168                         break;
169
170                 case CMD_INSERT:
171                 case CMD_DELETE:
172                 case CMD_UPDATE:
173                         estate->es_output_cid = GetCurrentCommandId(true);
174                         break;
175
176                 default:
177                         elog(ERROR, "unrecognized operation code: %d",
178                                  (int) queryDesc->operation);
179                         break;
180         }
181
182         /*
183          * Copy other important information into the EState
184          */
185         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
186         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
187         estate->es_instrument = queryDesc->instrument_options;
188
189         /*
190          * Initialize the plan state tree
191          */
192         InitPlan(queryDesc, eflags);
193
194         MemoryContextSwitchTo(oldcontext);
195 }
196
197 /* ----------------------------------------------------------------
198  *              ExecutorRun
199  *
200  *              This is the main routine of the executor module. It accepts
201  *              the query descriptor from the traffic cop and executes the
202  *              query plan.
203  *
204  *              ExecutorStart must have been called already.
205  *
206  *              If direction is NoMovementScanDirection then nothing is done
207  *              except to start up/shut down the destination.  Otherwise,
208  *              we retrieve up to 'count' tuples in the specified direction.
209  *
210  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
211  *              completion.
212  *
213  *              There is no return value, but output tuples (if any) are sent to
214  *              the destination receiver specified in the QueryDesc; and the number
215  *              of tuples processed at the top level can be found in
216  *              estate->es_processed.
217  *
218  *              We provide a function hook variable that lets loadable plugins
219  *              get control when ExecutorRun is called.  Such a plugin would
220  *              normally call standard_ExecutorRun().
221  *
222  * ----------------------------------------------------------------
223  */
224 void
225 ExecutorRun(QueryDesc *queryDesc,
226                         ScanDirection direction, long count)
227 {
228         if (ExecutorRun_hook)
229                 (*ExecutorRun_hook) (queryDesc, direction, count);
230         else
231                 standard_ExecutorRun(queryDesc, direction, count);
232 }
233
234 void
235 standard_ExecutorRun(QueryDesc *queryDesc,
236                                          ScanDirection direction, long count)
237 {
238         EState     *estate;
239         CmdType         operation;
240         DestReceiver *dest;
241         bool            sendTuples;
242         MemoryContext oldcontext;
243
244         /* sanity checks */
245         Assert(queryDesc != NULL);
246
247         estate = queryDesc->estate;
248
249         Assert(estate != NULL);
250
251         /*
252          * Switch into per-query memory context
253          */
254         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
255
256         /* Allow instrumentation of ExecutorRun overall runtime */
257         if (queryDesc->totaltime)
258                 InstrStartNode(queryDesc->totaltime);
259
260         /*
261          * extract information from the query descriptor and the query feature.
262          */
263         operation = queryDesc->operation;
264         dest = queryDesc->dest;
265
266         /*
267          * startup tuple receiver, if we will be emitting tuples
268          */
269         estate->es_processed = 0;
270         estate->es_lastoid = InvalidOid;
271
272         sendTuples = (operation == CMD_SELECT ||
273                                   queryDesc->plannedstmt->hasReturning);
274
275         if (sendTuples)
276                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
277
278         /*
279          * run plan
280          */
281         if (!ScanDirectionIsNoMovement(direction))
282                 ExecutePlan(estate,
283                                         queryDesc->planstate,
284                                         operation,
285                                         sendTuples,
286                                         count,
287                                         direction,
288                                         dest);
289
290         /*
291          * shutdown tuple receiver, if we started it
292          */
293         if (sendTuples)
294                 (*dest->rShutdown) (dest);
295
296         if (queryDesc->totaltime)
297                 InstrStopNode(queryDesc->totaltime, estate->es_processed);
298
299         MemoryContextSwitchTo(oldcontext);
300 }
301
302 /* ----------------------------------------------------------------
303  *              ExecutorEnd
304  *
305  *              This routine must be called at the end of execution of any
306  *              query plan
307  *
308  *              We provide a function hook variable that lets loadable plugins
309  *              get control when ExecutorEnd is called.  Such a plugin would
310  *              normally call standard_ExecutorEnd().
311  *
312  * ----------------------------------------------------------------
313  */
314 void
315 ExecutorEnd(QueryDesc *queryDesc)
316 {
317         if (ExecutorEnd_hook)
318                 (*ExecutorEnd_hook) (queryDesc);
319         else
320                 standard_ExecutorEnd(queryDesc);
321 }
322
323 void
324 standard_ExecutorEnd(QueryDesc *queryDesc)
325 {
326         EState     *estate;
327         MemoryContext oldcontext;
328
329         /* sanity checks */
330         Assert(queryDesc != NULL);
331
332         estate = queryDesc->estate;
333
334         Assert(estate != NULL);
335
336         /*
337          * Switch into per-query memory context to run ExecEndPlan
338          */
339         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
340
341         ExecEndPlan(queryDesc->planstate, estate);
342
343         /*
344          * Close the SELECT INTO relation if any
345          */
346         if (estate->es_select_into)
347                 CloseIntoRel(queryDesc);
348
349         /* do away with our snapshots */
350         UnregisterSnapshot(estate->es_snapshot);
351         UnregisterSnapshot(estate->es_crosscheck_snapshot);
352
353         /*
354          * Must switch out of context before destroying it
355          */
356         MemoryContextSwitchTo(oldcontext);
357
358         /*
359          * Release EState and per-query memory context.  This should release
360          * everything the executor has allocated.
361          */
362         FreeExecutorState(estate);
363
364         /* Reset queryDesc fields that no longer point to anything */
365         queryDesc->tupDesc = NULL;
366         queryDesc->estate = NULL;
367         queryDesc->planstate = NULL;
368         queryDesc->totaltime = NULL;
369 }
370
371 /* ----------------------------------------------------------------
372  *              ExecutorRewind
373  *
374  *              This routine may be called on an open queryDesc to rewind it
375  *              to the start.
376  * ----------------------------------------------------------------
377  */
378 void
379 ExecutorRewind(QueryDesc *queryDesc)
380 {
381         EState     *estate;
382         MemoryContext oldcontext;
383
384         /* sanity checks */
385         Assert(queryDesc != NULL);
386
387         estate = queryDesc->estate;
388
389         Assert(estate != NULL);
390
391         /* It's probably not sensible to rescan updating queries */
392         Assert(queryDesc->operation == CMD_SELECT);
393
394         /*
395          * Switch into per-query memory context
396          */
397         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
398
399         /*
400          * rescan plan
401          */
402         ExecReScan(queryDesc->planstate);
403
404         MemoryContextSwitchTo(oldcontext);
405 }
406
407
408 /*
409  * ExecCheckRTPerms
410  *              Check access permissions for all relations listed in a range table.
411  *
412  * Returns true if permissions are adequate.  Otherwise, throws an appropriate
413  * error if ereport_on_violation is true, or simply returns false otherwise.
414  */
415 bool
416 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
417 {
418         ListCell   *l;
419         bool            result = true;
420
421         foreach(l, rangeTable)
422         {
423                 RangeTblEntry  *rte = (RangeTblEntry *) lfirst(l);
424
425                 result = ExecCheckRTEPerms(rte);
426                 if (!result)
427                 {
428                         Assert(rte->rtekind == RTE_RELATION);
429                         if (ereport_on_violation)
430                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
431                                                            get_rel_name(rte->relid));
432                         return false;
433                 }
434         }
435
436         if (ExecutorCheckPerms_hook)
437                 result = (*ExecutorCheckPerms_hook)(rangeTable,
438                                                                                         ereport_on_violation);
439         return result;
440 }
441
442 /*
443  * ExecCheckRTEPerms
444  *              Check access permissions for a single RTE.
445  */
446 static bool
447 ExecCheckRTEPerms(RangeTblEntry *rte)
448 {
449         AclMode         requiredPerms;
450         AclMode         relPerms;
451         AclMode         remainingPerms;
452         Oid                     relOid;
453         Oid                     userid;
454         Bitmapset  *tmpset;
455         int                     col;
456
457         /*
458          * Only plain-relation RTEs need to be checked here.  Function RTEs are
459          * checked by init_fcache when the function is prepared for execution.
460          * Join, subquery, and special RTEs need no checks.
461          */
462         if (rte->rtekind != RTE_RELATION)
463                 return true;
464
465         /*
466          * No work if requiredPerms is empty.
467          */
468         requiredPerms = rte->requiredPerms;
469         if (requiredPerms == 0)
470                 return true;
471
472         relOid = rte->relid;
473
474         /*
475          * userid to check as: current user unless we have a setuid indication.
476          *
477          * Note: GetUserId() is presently fast enough that there's no harm in
478          * calling it separately for each RTE.  If that stops being true, we could
479          * call it once in ExecCheckRTPerms and pass the userid down from there.
480          * But for now, no need for the extra clutter.
481          */
482         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
483
484         /*
485          * We must have *all* the requiredPerms bits, but some of the bits can be
486          * satisfied from column-level rather than relation-level permissions.
487          * First, remove any bits that are satisfied by relation permissions.
488          */
489         relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
490         remainingPerms = requiredPerms & ~relPerms;
491         if (remainingPerms != 0)
492         {
493                 /*
494                  * If we lack any permissions that exist only as relation permissions,
495                  * we can fail straight away.
496                  */
497                 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
498                         return false;
499
500                 /*
501                  * Check to see if we have the needed privileges at column level.
502                  *
503                  * Note: failures just report a table-level error; it would be nicer
504                  * to report a column-level error if we have some but not all of the
505                  * column privileges.
506                  */
507                 if (remainingPerms & ACL_SELECT)
508                 {
509                         /*
510                          * When the query doesn't explicitly reference any columns (for
511                          * example, SELECT COUNT(*) FROM table), allow the query if we
512                          * have SELECT on any column of the rel, as per SQL spec.
513                          */
514                         if (bms_is_empty(rte->selectedCols))
515                         {
516                                 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
517                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
518                                         return false;
519                         }
520
521                         tmpset = bms_copy(rte->selectedCols);
522                         while ((col = bms_first_member(tmpset)) >= 0)
523                         {
524                                 /* remove the column number offset */
525                                 col += FirstLowInvalidHeapAttributeNumber;
526                                 if (col == InvalidAttrNumber)
527                                 {
528                                         /* Whole-row reference, must have priv on all cols */
529                                         if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
530                                                                                                   ACLMASK_ALL) != ACLCHECK_OK)
531                                                 return false;
532                                 }
533                                 else
534                                 {
535                                         if (pg_attribute_aclcheck(relOid, col, userid,
536                                                                                           ACL_SELECT) != ACLCHECK_OK)
537                                                 return false;
538                                 }
539                         }
540                         bms_free(tmpset);
541                 }
542
543                 /*
544                  * Basically the same for the mod columns, with either INSERT or
545                  * UPDATE privilege as specified by remainingPerms.
546                  */
547                 remainingPerms &= ~ACL_SELECT;
548                 if (remainingPerms != 0)
549                 {
550                         /*
551                          * When the query doesn't explicitly change any columns, allow the
552                          * query if we have permission on any column of the rel.  This is
553                          * to handle SELECT FOR UPDATE as well as possible corner cases in
554                          * INSERT and UPDATE.
555                          */
556                         if (bms_is_empty(rte->modifiedCols))
557                         {
558                                 if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
559                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
560                                         return false;
561                         }
562
563                         tmpset = bms_copy(rte->modifiedCols);
564                         while ((col = bms_first_member(tmpset)) >= 0)
565                         {
566                                 /* remove the column number offset */
567                                 col += FirstLowInvalidHeapAttributeNumber;
568                                 if (col == InvalidAttrNumber)
569                                 {
570                                         /* whole-row reference can't happen here */
571                                         elog(ERROR, "whole-row update is not implemented");
572                                 }
573                                 else
574                                 {
575                                         if (pg_attribute_aclcheck(relOid, col, userid,
576                                                                                           remainingPerms) != ACLCHECK_OK)
577                                                 return false;
578                                 }
579                         }
580                         bms_free(tmpset);
581                 }
582         }
583         return true;
584 }
585
586 /*
587  * Check that the query does not imply any writes to non-temp tables.
588  *
589  * Note: in a Hot Standby slave this would need to reject writes to temp
590  * tables as well; but an HS slave can't have created any temp tables
591  * in the first place, so no need to check that.
592  */
593 static void
594 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
595 {
596         ListCell   *l;
597
598         /*
599          * CREATE TABLE AS or SELECT INTO?
600          *
601          * XXX should we allow this if the destination is temp?  Considering that
602          * it would still require catalog changes, probably not.
603          */
604         if (plannedstmt->intoClause != NULL)
605                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
606
607         /* Fail if write permissions are requested on any non-temp table */
608         foreach(l, plannedstmt->rtable)
609         {
610                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
611
612                 if (rte->rtekind != RTE_RELATION)
613                         continue;
614
615                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
616                         continue;
617
618                 if (isTempNamespace(get_rel_namespace(rte->relid)))
619                         continue;
620
621                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
622         }
623 }
624
625
626 /* ----------------------------------------------------------------
627  *              InitPlan
628  *
629  *              Initializes the query plan: open files, allocate storage
630  *              and start up the rule manager
631  * ----------------------------------------------------------------
632  */
633 static void
634 InitPlan(QueryDesc *queryDesc, int eflags)
635 {
636         CmdType         operation = queryDesc->operation;
637         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
638         Plan       *plan = plannedstmt->planTree;
639         List       *rangeTable = plannedstmt->rtable;
640         EState     *estate = queryDesc->estate;
641         PlanState  *planstate;
642         TupleDesc       tupType;
643         ListCell   *l;
644         int                     i;
645
646         /*
647          * Do permissions checks
648          */
649         ExecCheckRTPerms(rangeTable, true);
650
651         /*
652          * initialize the node's execution state
653          */
654         estate->es_range_table = rangeTable;
655         estate->es_plannedstmt = plannedstmt;
656
657         /*
658          * initialize result relation stuff, and open/lock the result rels.
659          *
660          * We must do this before initializing the plan tree, else we might try to
661          * do a lock upgrade if a result rel is also a source rel.
662          */
663         if (plannedstmt->resultRelations)
664         {
665                 List       *resultRelations = plannedstmt->resultRelations;
666                 int                     numResultRelations = list_length(resultRelations);
667                 ResultRelInfo *resultRelInfos;
668                 ResultRelInfo *resultRelInfo;
669
670                 resultRelInfos = (ResultRelInfo *)
671                         palloc(numResultRelations * sizeof(ResultRelInfo));
672                 resultRelInfo = resultRelInfos;
673                 foreach(l, resultRelations)
674                 {
675                         Index           resultRelationIndex = lfirst_int(l);
676                         Oid                     resultRelationOid;
677                         Relation        resultRelation;
678
679                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
680                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
681                         InitResultRelInfo(resultRelInfo,
682                                                           resultRelation,
683                                                           resultRelationIndex,
684                                                           operation,
685                                                           estate->es_instrument);
686                         resultRelInfo++;
687                 }
688                 estate->es_result_relations = resultRelInfos;
689                 estate->es_num_result_relations = numResultRelations;
690                 /* es_result_relation_info is NULL except when within ModifyTable */
691                 estate->es_result_relation_info = NULL;
692         }
693         else
694         {
695                 /*
696                  * if no result relation, then set state appropriately
697                  */
698                 estate->es_result_relations = NULL;
699                 estate->es_num_result_relations = 0;
700                 estate->es_result_relation_info = NULL;
701         }
702
703         /*
704          * Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
705          * before we initialize the plan tree, else we'd be risking lock upgrades.
706          * While we are at it, build the ExecRowMark list.
707          */
708         estate->es_rowMarks = NIL;
709         foreach(l, plannedstmt->rowMarks)
710         {
711                 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
712                 Oid                     relid;
713                 Relation        relation;
714                 ExecRowMark *erm;
715
716                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
717                 if (rc->isParent)
718                         continue;
719
720                 switch (rc->markType)
721                 {
722                         case ROW_MARK_EXCLUSIVE:
723                         case ROW_MARK_SHARE:
724                                 relid = getrelid(rc->rti, rangeTable);
725                                 relation = heap_open(relid, RowShareLock);
726                                 break;
727                         case ROW_MARK_REFERENCE:
728                                 relid = getrelid(rc->rti, rangeTable);
729                                 relation = heap_open(relid, AccessShareLock);
730                                 break;
731                         case ROW_MARK_COPY:
732                                 /* there's no real table here ... */
733                                 relation = NULL;
734                                 break;
735                         default:
736                                 elog(ERROR, "unrecognized markType: %d", rc->markType);
737                                 relation = NULL;        /* keep compiler quiet */
738                                 break;
739                 }
740
741                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
742                 erm->relation = relation;
743                 erm->rti = rc->rti;
744                 erm->prti = rc->prti;
745                 erm->markType = rc->markType;
746                 erm->noWait = rc->noWait;
747                 erm->ctidAttNo = rc->ctidAttNo;
748                 erm->toidAttNo = rc->toidAttNo;
749                 erm->wholeAttNo = rc->wholeAttNo;
750                 ItemPointerSetInvalid(&(erm->curCtid));
751                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
752         }
753
754         /*
755          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
756          * flag appropriately so that the plan tree will be initialized with the
757          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
758          */
759         estate->es_select_into = false;
760         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
761         {
762                 estate->es_select_into = true;
763                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
764         }
765
766         /*
767          * Initialize the executor's tuple table to empty.
768          */
769         estate->es_tupleTable = NIL;
770         estate->es_trig_tuple_slot = NULL;
771         estate->es_trig_oldtup_slot = NULL;
772
773         /* mark EvalPlanQual not active */
774         estate->es_epqTuple = NULL;
775         estate->es_epqTupleSet = NULL;
776         estate->es_epqScanDone = NULL;
777
778         /*
779          * Initialize private state information for each SubPlan.  We must do this
780          * before running ExecInitNode on the main query tree, since
781          * ExecInitSubPlan expects to be able to find these entries.
782          */
783         Assert(estate->es_subplanstates == NIL);
784         i = 1;                                          /* subplan indices count from 1 */
785         foreach(l, plannedstmt->subplans)
786         {
787                 Plan       *subplan = (Plan *) lfirst(l);
788                 PlanState  *subplanstate;
789                 int                     sp_eflags;
790
791                 /*
792                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
793                  * it is a parameterless subplan (not initplan), we suggest that it be
794                  * prepared to handle REWIND efficiently; otherwise there is no need.
795                  */
796                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
797                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
798                         sp_eflags |= EXEC_FLAG_REWIND;
799
800                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
801
802                 estate->es_subplanstates = lappend(estate->es_subplanstates,
803                                                                                    subplanstate);
804
805                 i++;
806         }
807
808         /*
809          * Initialize the private state information for all the nodes in the query
810          * tree.  This opens files, allocates storage and leaves us ready to start
811          * processing tuples.
812          */
813         planstate = ExecInitNode(plan, estate, eflags);
814
815         /*
816          * Get the tuple descriptor describing the type of tuples to return. (this
817          * is especially important if we are creating a relation with "SELECT
818          * INTO")
819          */
820         tupType = ExecGetResultType(planstate);
821
822         /*
823          * Initialize the junk filter if needed.  SELECT queries need a filter if
824          * there are any junk attrs in the top-level tlist.
825          */
826         if (operation == CMD_SELECT)
827         {
828                 bool            junk_filter_needed = false;
829                 ListCell   *tlist;
830
831                 foreach(tlist, plan->targetlist)
832                 {
833                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
834
835                         if (tle->resjunk)
836                         {
837                                 junk_filter_needed = true;
838                                 break;
839                         }
840                 }
841
842                 if (junk_filter_needed)
843                 {
844                         JunkFilter *j;
845
846                         j = ExecInitJunkFilter(planstate->plan->targetlist,
847                                                                    tupType->tdhasoid,
848                                                                    ExecInitExtraTupleSlot(estate));
849                         estate->es_junkFilter = j;
850
851                         /* Want to return the cleaned tuple type */
852                         tupType = j->jf_cleanTupType;
853                 }
854         }
855
856         queryDesc->tupDesc = tupType;
857         queryDesc->planstate = planstate;
858
859         /*
860          * If doing SELECT INTO, initialize the "into" relation.  We must wait
861          * till now so we have the "clean" result tuple type to create the new
862          * table from.
863          *
864          * If EXPLAIN, skip creating the "into" relation.
865          */
866         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
867                 OpenIntoRel(queryDesc);
868 }
869
870 /*
871  * Initialize ResultRelInfo data for one result relation
872  */
873 void
874 InitResultRelInfo(ResultRelInfo *resultRelInfo,
875                                   Relation resultRelationDesc,
876                                   Index resultRelationIndex,
877                                   CmdType operation,
878                                   int instrument_options)
879 {
880         /*
881          * Check valid relkind ... parser and/or planner should have noticed this
882          * already, but let's make sure.
883          */
884         switch (resultRelationDesc->rd_rel->relkind)
885         {
886                 case RELKIND_RELATION:
887                         /* OK */
888                         break;
889                 case RELKIND_SEQUENCE:
890                         ereport(ERROR,
891                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
892                                          errmsg("cannot change sequence \"%s\"",
893                                                         RelationGetRelationName(resultRelationDesc))));
894                         break;
895                 case RELKIND_TOASTVALUE:
896                         ereport(ERROR,
897                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
898                                          errmsg("cannot change TOAST relation \"%s\"",
899                                                         RelationGetRelationName(resultRelationDesc))));
900                         break;
901                 case RELKIND_VIEW:
902                         ereport(ERROR,
903                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
904                                          errmsg("cannot change view \"%s\"",
905                                                         RelationGetRelationName(resultRelationDesc))));
906                         break;
907                 default:
908                         ereport(ERROR,
909                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
910                                          errmsg("cannot change relation \"%s\"",
911                                                         RelationGetRelationName(resultRelationDesc))));
912                         break;
913         }
914
915         /* OK, fill in the node */
916         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
917         resultRelInfo->type = T_ResultRelInfo;
918         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
919         resultRelInfo->ri_RelationDesc = resultRelationDesc;
920         resultRelInfo->ri_NumIndices = 0;
921         resultRelInfo->ri_IndexRelationDescs = NULL;
922         resultRelInfo->ri_IndexRelationInfo = NULL;
923         /* make a copy so as not to depend on relcache info not changing... */
924         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
925         if (resultRelInfo->ri_TrigDesc)
926         {
927                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
928
929                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
930                         palloc0(n * sizeof(FmgrInfo));
931                 resultRelInfo->ri_TrigWhenExprs = (List **)
932                         palloc0(n * sizeof(List *));
933                 if (instrument_options)
934                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
935         }
936         else
937         {
938                 resultRelInfo->ri_TrigFunctions = NULL;
939                 resultRelInfo->ri_TrigWhenExprs = NULL;
940                 resultRelInfo->ri_TrigInstrument = NULL;
941         }
942         resultRelInfo->ri_ConstraintExprs = NULL;
943         resultRelInfo->ri_junkFilter = NULL;
944         resultRelInfo->ri_projectReturning = NULL;
945
946         /*
947          * If there are indices on the result relation, open them and save
948          * descriptors in the result relation info, so that we can add new index
949          * entries for the tuples we add/update.  We need not do this for a
950          * DELETE, however, since deletion doesn't affect indexes.
951          */
952         if (resultRelationDesc->rd_rel->relhasindex &&
953                 operation != CMD_DELETE)
954                 ExecOpenIndices(resultRelInfo);
955 }
956
957 /*
958  *              ExecGetTriggerResultRel
959  *
960  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
961  * triggers are fired on one of the result relations of the query, and so
962  * we can just return a member of the es_result_relations array.  (Note: in
963  * self-join situations there might be multiple members with the same OID;
964  * if so it doesn't matter which one we pick.)  However, it is sometimes
965  * necessary to fire triggers on other relations; this happens mainly when an
966  * RI update trigger queues additional triggers on other relations, which will
967  * be processed in the context of the outer query.      For efficiency's sake,
968  * we want to have a ResultRelInfo for those triggers too; that can avoid
969  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
970  * ANALYZE to report the runtimes of such triggers.)  So we make additional
971  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
972  */
973 ResultRelInfo *
974 ExecGetTriggerResultRel(EState *estate, Oid relid)
975 {
976         ResultRelInfo *rInfo;
977         int                     nr;
978         ListCell   *l;
979         Relation        rel;
980         MemoryContext oldcontext;
981
982         /* First, search through the query result relations */
983         rInfo = estate->es_result_relations;
984         nr = estate->es_num_result_relations;
985         while (nr > 0)
986         {
987                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
988                         return rInfo;
989                 rInfo++;
990                 nr--;
991         }
992         /* Nope, but maybe we already made an extra ResultRelInfo for it */
993         foreach(l, estate->es_trig_target_relations)
994         {
995                 rInfo = (ResultRelInfo *) lfirst(l);
996                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
997                         return rInfo;
998         }
999         /* Nope, so we need a new one */
1000
1001         /*
1002          * Open the target relation's relcache entry.  We assume that an
1003          * appropriate lock is still held by the backend from whenever the trigger
1004          * event got queued, so we need take no new lock here.
1005          */
1006         rel = heap_open(relid, NoLock);
1007
1008         /*
1009          * Make the new entry in the right context.  Currently, we don't need any
1010          * index information in ResultRelInfos used only for triggers, so tell
1011          * InitResultRelInfo it's a DELETE.
1012          */
1013         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1014         rInfo = makeNode(ResultRelInfo);
1015         InitResultRelInfo(rInfo,
1016                                           rel,
1017                                           0,            /* dummy rangetable index */
1018                                           CMD_DELETE,
1019                                           estate->es_instrument);
1020         estate->es_trig_target_relations =
1021                 lappend(estate->es_trig_target_relations, rInfo);
1022         MemoryContextSwitchTo(oldcontext);
1023
1024         return rInfo;
1025 }
1026
1027 /*
1028  *              ExecContextForcesOids
1029  *
1030  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1031  * we need to ensure that result tuples have space for an OID iff they are
1032  * going to be stored into a relation that has OIDs.  In other contexts
1033  * we are free to choose whether to leave space for OIDs in result tuples
1034  * (we generally don't want to, but we do if a physical-tlist optimization
1035  * is possible).  This routine checks the plan context and returns TRUE if the
1036  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1037  * *hasoids is set to the required value.
1038  *
1039  * One reason this is ugly is that all plan nodes in the plan tree will emit
1040  * tuples with space for an OID, though we really only need the topmost node
1041  * to do so.  However, node types like Sort don't project new tuples but just
1042  * return their inputs, and in those cases the requirement propagates down
1043  * to the input node.  Eventually we might make this code smart enough to
1044  * recognize how far down the requirement really goes, but for now we just
1045  * make all plan nodes do the same thing if the top level forces the choice.
1046  *
1047  * We assume that if we are generating tuples for INSERT or UPDATE,
1048  * estate->es_result_relation_info is already set up to describe the target
1049  * relation.  Note that in an UPDATE that spans an inheritance tree, some of
1050  * the target relations may have OIDs and some not.  We have to make the
1051  * decisions on a per-relation basis as we initialize each of the subplans of
1052  * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1053  * while initializing each subplan.
1054  *
1055  * SELECT INTO is even uglier, because we don't have the INTO relation's
1056  * descriptor available when this code runs; we have to look aside at a
1057  * flag set by InitPlan().
1058  */
1059 bool
1060 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1061 {
1062         ResultRelInfo *ri = planstate->state->es_result_relation_info;
1063
1064         if (ri != NULL)
1065         {
1066                 Relation        rel = ri->ri_RelationDesc;
1067
1068                 if (rel != NULL)
1069                 {
1070                         *hasoids = rel->rd_rel->relhasoids;
1071                         return true;
1072                 }
1073         }
1074
1075         if (planstate->state->es_select_into)
1076         {
1077                 *hasoids = planstate->state->es_into_oids;
1078                 return true;
1079         }
1080
1081         return false;
1082 }
1083
1084 /* ----------------------------------------------------------------
1085  *              ExecEndPlan
1086  *
1087  *              Cleans up the query plan -- closes files and frees up storage
1088  *
1089  * NOTE: we are no longer very worried about freeing storage per se
1090  * in this code; FreeExecutorState should be guaranteed to release all
1091  * memory that needs to be released.  What we are worried about doing
1092  * is closing relations and dropping buffer pins.  Thus, for example,
1093  * tuple tables must be cleared or dropped to ensure pins are released.
1094  * ----------------------------------------------------------------
1095  */
1096 static void
1097 ExecEndPlan(PlanState *planstate, EState *estate)
1098 {
1099         ResultRelInfo *resultRelInfo;
1100         int                     i;
1101         ListCell   *l;
1102
1103         /*
1104          * shut down the node-type-specific query processing
1105          */
1106         ExecEndNode(planstate);
1107
1108         /*
1109          * for subplans too
1110          */
1111         foreach(l, estate->es_subplanstates)
1112         {
1113                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1114
1115                 ExecEndNode(subplanstate);
1116         }
1117
1118         /*
1119          * destroy the executor's tuple table.  Actually we only care about
1120          * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1121          * the TupleTableSlots, since the containing memory context is about to go
1122          * away anyway.
1123          */
1124         ExecResetTupleTable(estate->es_tupleTable, false);
1125
1126         /*
1127          * close the result relation(s) if any, but hold locks until xact commit.
1128          */
1129         resultRelInfo = estate->es_result_relations;
1130         for (i = estate->es_num_result_relations; i > 0; i--)
1131         {
1132                 /* Close indices and then the relation itself */
1133                 ExecCloseIndices(resultRelInfo);
1134                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1135                 resultRelInfo++;
1136         }
1137
1138         /*
1139          * likewise close any trigger target relations
1140          */
1141         foreach(l, estate->es_trig_target_relations)
1142         {
1143                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1144                 /* Close indices and then the relation itself */
1145                 ExecCloseIndices(resultRelInfo);
1146                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1147         }
1148
1149         /*
1150          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1151          */
1152         foreach(l, estate->es_rowMarks)
1153         {
1154                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1155
1156                 if (erm->relation)
1157                         heap_close(erm->relation, NoLock);
1158         }
1159 }
1160
1161 /* ----------------------------------------------------------------
1162  *              ExecutePlan
1163  *
1164  *              Processes the query plan until we have processed 'numberTuples' tuples,
1165  *              moving in the specified direction.
1166  *
1167  *              Runs to completion if numberTuples is 0
1168  *
1169  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1170  * user can see it
1171  * ----------------------------------------------------------------
1172  */
1173 static void
1174 ExecutePlan(EState *estate,
1175                         PlanState *planstate,
1176                         CmdType operation,
1177                         bool sendTuples,
1178                         long numberTuples,
1179                         ScanDirection direction,
1180                         DestReceiver *dest)
1181 {
1182         TupleTableSlot *slot;
1183         long            current_tuple_count;
1184
1185         /*
1186          * initialize local variables
1187          */
1188         current_tuple_count = 0;
1189
1190         /*
1191          * Set the direction.
1192          */
1193         estate->es_direction = direction;
1194
1195         /*
1196          * Loop until we've processed the proper number of tuples from the plan.
1197          */
1198         for (;;)
1199         {
1200                 /* Reset the per-output-tuple exprcontext */
1201                 ResetPerTupleExprContext(estate);
1202
1203                 /*
1204                  * Execute the plan and obtain a tuple
1205                  */
1206                 slot = ExecProcNode(planstate);
1207
1208                 /*
1209                  * if the tuple is null, then we assume there is nothing more to
1210                  * process so we just end the loop...
1211                  */
1212                 if (TupIsNull(slot))
1213                         break;
1214
1215                 /*
1216                  * If we have a junk filter, then project a new tuple with the junk
1217                  * removed.
1218                  *
1219                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1220                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1221                  * because that tuple slot has the wrong descriptor.)
1222                  */
1223                 if (estate->es_junkFilter != NULL)
1224                         slot = ExecFilterJunk(estate->es_junkFilter, slot);
1225
1226                 /*
1227                  * If we are supposed to send the tuple somewhere, do so. (In
1228                  * practice, this is probably always the case at this point.)
1229                  */
1230                 if (sendTuples)
1231                         (*dest->receiveSlot) (slot, dest);
1232
1233                 /*
1234                  * Count tuples processed, if this is a SELECT.  (For other operation
1235                  * types, the ModifyTable plan node must count the appropriate
1236                  * events.)
1237                  */
1238                 if (operation == CMD_SELECT)
1239                         (estate->es_processed)++;
1240
1241                 /*
1242                  * check our tuple count.. if we've processed the proper number then
1243                  * quit, else loop again and process more tuples.  Zero numberTuples
1244                  * means no limit.
1245                  */
1246                 current_tuple_count++;
1247                 if (numberTuples && numberTuples == current_tuple_count)
1248                         break;
1249         }
1250 }
1251
1252
1253 /*
1254  * ExecRelCheck --- check that tuple meets constraints for result relation
1255  */
1256 static const char *
1257 ExecRelCheck(ResultRelInfo *resultRelInfo,
1258                          TupleTableSlot *slot, EState *estate)
1259 {
1260         Relation        rel = resultRelInfo->ri_RelationDesc;
1261         int                     ncheck = rel->rd_att->constr->num_check;
1262         ConstrCheck *check = rel->rd_att->constr->check;
1263         ExprContext *econtext;
1264         MemoryContext oldContext;
1265         List       *qual;
1266         int                     i;
1267
1268         /*
1269          * If first time through for this result relation, build expression
1270          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1271          * memory context so they'll survive throughout the query.
1272          */
1273         if (resultRelInfo->ri_ConstraintExprs == NULL)
1274         {
1275                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1276                 resultRelInfo->ri_ConstraintExprs =
1277                         (List **) palloc(ncheck * sizeof(List *));
1278                 for (i = 0; i < ncheck; i++)
1279                 {
1280                         /* ExecQual wants implicit-AND form */
1281                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1282                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1283                                 ExecPrepareExpr((Expr *) qual, estate);
1284                 }
1285                 MemoryContextSwitchTo(oldContext);
1286         }
1287
1288         /*
1289          * We will use the EState's per-tuple context for evaluating constraint
1290          * expressions (creating it if it's not already there).
1291          */
1292         econtext = GetPerTupleExprContext(estate);
1293
1294         /* Arrange for econtext's scan tuple to be the tuple under test */
1295         econtext->ecxt_scantuple = slot;
1296
1297         /* And evaluate the constraints */
1298         for (i = 0; i < ncheck; i++)
1299         {
1300                 qual = resultRelInfo->ri_ConstraintExprs[i];
1301
1302                 /*
1303                  * NOTE: SQL92 specifies that a NULL result from a constraint
1304                  * expression is not to be treated as a failure.  Therefore, tell
1305                  * ExecQual to return TRUE for NULL.
1306                  */
1307                 if (!ExecQual(qual, econtext, true))
1308                         return check[i].ccname;
1309         }
1310
1311         /* NULL result means no error */
1312         return NULL;
1313 }
1314
1315 void
1316 ExecConstraints(ResultRelInfo *resultRelInfo,
1317                                 TupleTableSlot *slot, EState *estate)
1318 {
1319         Relation        rel = resultRelInfo->ri_RelationDesc;
1320         TupleConstr *constr = rel->rd_att->constr;
1321
1322         Assert(constr);
1323
1324         if (constr->has_not_null)
1325         {
1326                 int                     natts = rel->rd_att->natts;
1327                 int                     attrChk;
1328
1329                 for (attrChk = 1; attrChk <= natts; attrChk++)
1330                 {
1331                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1332                                 slot_attisnull(slot, attrChk))
1333                                 ereport(ERROR,
1334                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1335                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1336                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1337                 }
1338         }
1339
1340         if (constr->num_check > 0)
1341         {
1342                 const char *failed;
1343
1344                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1345                         ereport(ERROR,
1346                                         (errcode(ERRCODE_CHECK_VIOLATION),
1347                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1348                                                         RelationGetRelationName(rel), failed)));
1349         }
1350 }
1351
1352
1353 /*
1354  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
1355  * process the updated version under READ COMMITTED rules.
1356  *
1357  * See backend/executor/README for some info about how this works.
1358  */
1359
1360
1361 /*
1362  * Check a modified tuple to see if we want to process its updated version
1363  * under READ COMMITTED rules.
1364  *
1365  *      estate - outer executor state data
1366  *      epqstate - state for EvalPlanQual rechecking
1367  *      relation - table containing tuple
1368  *      rti - rangetable index of table containing tuple
1369  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1370  *      priorXmax - t_xmax from the outdated tuple
1371  *
1372  * *tid is also an output parameter: it's modified to hold the TID of the
1373  * latest version of the tuple (note this may be changed even on failure)
1374  *
1375  * Returns a slot containing the new candidate update/delete tuple, or
1376  * NULL if we determine we shouldn't process the row.
1377  */
1378 TupleTableSlot *
1379 EvalPlanQual(EState *estate, EPQState *epqstate,
1380                          Relation relation, Index rti,
1381                          ItemPointer tid, TransactionId priorXmax)
1382 {
1383         TupleTableSlot *slot;
1384         HeapTuple       copyTuple;
1385
1386         Assert(rti > 0);
1387
1388         /*
1389          * Get and lock the updated version of the row; if fail, return NULL.
1390          */
1391         copyTuple = EvalPlanQualFetch(estate, relation, LockTupleExclusive,
1392                                                                   tid, priorXmax);
1393
1394         if (copyTuple == NULL)
1395                 return NULL;
1396
1397         /*
1398          * For UPDATE/DELETE we have to return tid of actual row we're executing
1399          * PQ for.
1400          */
1401         *tid = copyTuple->t_self;
1402
1403         /*
1404          * Need to run a recheck subquery.      Initialize or reinitialize EPQ state.
1405          */
1406         EvalPlanQualBegin(epqstate, estate);
1407
1408         /*
1409          * Free old test tuple, if any, and store new tuple where relation's scan
1410          * node will see it
1411          */
1412         EvalPlanQualSetTuple(epqstate, rti, copyTuple);
1413
1414         /*
1415          * Fetch any non-locked source rows
1416          */
1417         EvalPlanQualFetchRowMarks(epqstate);
1418
1419         /*
1420          * Run the EPQ query.  We assume it will return at most one tuple.
1421          */
1422         slot = EvalPlanQualNext(epqstate);
1423
1424         /*
1425          * If we got a tuple, force the slot to materialize the tuple so that it
1426          * is not dependent on any local state in the EPQ query (in particular,
1427          * it's highly likely that the slot contains references to any pass-by-ref
1428          * datums that may be present in copyTuple).  As with the next step, this
1429          * is to guard against early re-use of the EPQ query.
1430          */
1431         if (!TupIsNull(slot))
1432                 (void) ExecMaterializeSlot(slot);
1433
1434         /*
1435          * Clear out the test tuple.  This is needed in case the EPQ query is
1436          * re-used to test a tuple for a different relation.  (Not clear that can
1437          * really happen, but let's be safe.)
1438          */
1439         EvalPlanQualSetTuple(epqstate, rti, NULL);
1440
1441         return slot;
1442 }
1443
1444 /*
1445  * Fetch a copy of the newest version of an outdated tuple
1446  *
1447  *      estate - executor state data
1448  *      relation - table containing tuple
1449  *      lockmode - requested tuple lock mode
1450  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1451  *      priorXmax - t_xmax from the outdated tuple
1452  *
1453  * Returns a palloc'd copy of the newest tuple version, or NULL if we find
1454  * that there is no newest version (ie, the row was deleted not updated).
1455  * If successful, we have locked the newest tuple version, so caller does not
1456  * need to worry about it changing anymore.
1457  *
1458  * Note: properly, lockmode should be declared as enum LockTupleMode,
1459  * but we use "int" to avoid having to include heapam.h in executor.h.
1460  */
1461 HeapTuple
1462 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
1463                                   ItemPointer tid, TransactionId priorXmax)
1464 {
1465         HeapTuple       copyTuple = NULL;
1466         HeapTupleData tuple;
1467         SnapshotData SnapshotDirty;
1468
1469         /*
1470          * fetch target tuple
1471          *
1472          * Loop here to deal with updated or busy tuples
1473          */
1474         InitDirtySnapshot(SnapshotDirty);
1475         tuple.t_self = *tid;
1476         for (;;)
1477         {
1478                 Buffer          buffer;
1479
1480                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
1481                 {
1482                         HTSU_Result test;
1483                         ItemPointerData update_ctid;
1484                         TransactionId update_xmax;
1485
1486                         /*
1487                          * If xmin isn't what we're expecting, the slot must have been
1488                          * recycled and reused for an unrelated tuple.  This implies that
1489                          * the latest version of the row was deleted, so we need do
1490                          * nothing.  (Should be safe to examine xmin without getting
1491                          * buffer's content lock, since xmin never changes in an existing
1492                          * tuple.)
1493                          */
1494                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1495                                                                          priorXmax))
1496                         {
1497                                 ReleaseBuffer(buffer);
1498                                 return NULL;
1499                         }
1500
1501                         /* otherwise xmin should not be dirty... */
1502                         if (TransactionIdIsValid(SnapshotDirty.xmin))
1503                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1504
1505                         /*
1506                          * If tuple is being updated by other transaction then we have to
1507                          * wait for its commit/abort.
1508                          */
1509                         if (TransactionIdIsValid(SnapshotDirty.xmax))
1510                         {
1511                                 ReleaseBuffer(buffer);
1512                                 XactLockTableWait(SnapshotDirty.xmax);
1513                                 continue;               /* loop back to repeat heap_fetch */
1514                         }
1515
1516                         /*
1517                          * If tuple was inserted by our own transaction, we have to check
1518                          * cmin against es_output_cid: cmin >= current CID means our
1519                          * command cannot see the tuple, so we should ignore it.  Without
1520                          * this we are open to the "Halloween problem" of indefinitely
1521                          * re-updating the same tuple. (We need not check cmax because
1522                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
1523                          * transaction dead, regardless of cmax.)  We just checked that
1524                          * priorXmax == xmin, so we can test that variable instead of
1525                          * doing HeapTupleHeaderGetXmin again.
1526                          */
1527                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1528                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
1529                         {
1530                                 ReleaseBuffer(buffer);
1531                                 return NULL;
1532                         }
1533
1534                         /*
1535                          * This is a live tuple, so now try to lock it.
1536                          */
1537                         test = heap_lock_tuple(relation, &tuple, &buffer,
1538                                                                    &update_ctid, &update_xmax,
1539                                                                    estate->es_output_cid,
1540                                                                    lockmode, false);
1541                         /* We now have two pins on the buffer, get rid of one */
1542                         ReleaseBuffer(buffer);
1543
1544                         switch (test)
1545                         {
1546                                 case HeapTupleSelfUpdated:
1547                                         /* treat it as deleted; do not process */
1548                                         ReleaseBuffer(buffer);
1549                                         return NULL;
1550
1551                                 case HeapTupleMayBeUpdated:
1552                                         /* successfully locked */
1553                                         break;
1554
1555                                 case HeapTupleUpdated:
1556                                         ReleaseBuffer(buffer);
1557                                         if (IsXactIsoLevelSerializable)
1558                                                 ereport(ERROR,
1559                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1560                                                                  errmsg("could not serialize access due to concurrent update")));
1561                                         if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
1562                                         {
1563                                                 /* it was updated, so look at the updated version */
1564                                                 tuple.t_self = update_ctid;
1565                                                 /* updated row should have xmin matching this xmax */
1566                                                 priorXmax = update_xmax;
1567                                                 continue;
1568                                         }
1569                                         /* tuple was deleted, so give up */
1570                                         return NULL;
1571
1572                                 default:
1573                                         ReleaseBuffer(buffer);
1574                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1575                                                  test);
1576                                         return NULL;    /* keep compiler quiet */
1577                         }
1578
1579                         /*
1580                          * We got tuple - now copy it for use by recheck query.
1581                          */
1582                         copyTuple = heap_copytuple(&tuple);
1583                         ReleaseBuffer(buffer);
1584                         break;
1585                 }
1586
1587                 /*
1588                  * If the referenced slot was actually empty, the latest version of
1589                  * the row must have been deleted, so we need do nothing.
1590                  */
1591                 if (tuple.t_data == NULL)
1592                 {
1593                         ReleaseBuffer(buffer);
1594                         return NULL;
1595                 }
1596
1597                 /*
1598                  * As above, if xmin isn't what we're expecting, do nothing.
1599                  */
1600                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1601                                                                  priorXmax))
1602                 {
1603                         ReleaseBuffer(buffer);
1604                         return NULL;
1605                 }
1606
1607                 /*
1608                  * If we get here, the tuple was found but failed SnapshotDirty.
1609                  * Assuming the xmin is either a committed xact or our own xact (as it
1610                  * certainly should be if we're trying to modify the tuple), this must
1611                  * mean that the row was updated or deleted by either a committed xact
1612                  * or our own xact.  If it was deleted, we can ignore it; if it was
1613                  * updated then chain up to the next version and repeat the whole
1614                  * process.
1615                  *
1616                  * As above, it should be safe to examine xmax and t_ctid without the
1617                  * buffer content lock, because they can't be changing.
1618                  */
1619                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
1620                 {
1621                         /* deleted, so forget about it */
1622                         ReleaseBuffer(buffer);
1623                         return NULL;
1624                 }
1625
1626                 /* updated, so look at the updated row */
1627                 tuple.t_self = tuple.t_data->t_ctid;
1628                 /* updated row should have xmin matching this xmax */
1629                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
1630                 ReleaseBuffer(buffer);
1631                 /* loop back to fetch next in chain */
1632         }
1633
1634         /*
1635          * Return the copied tuple
1636          */
1637         return copyTuple;
1638 }
1639
1640 /*
1641  * EvalPlanQualInit -- initialize during creation of a plan state node
1642  * that might need to invoke EPQ processing.
1643  * Note: subplan can be NULL if it will be set later with EvalPlanQualSetPlan.
1644  */
1645 void
1646 EvalPlanQualInit(EPQState *epqstate, EState *estate,
1647                                  Plan *subplan, int epqParam)
1648 {
1649         /* Mark the EPQ state inactive */
1650         epqstate->estate = NULL;
1651         epqstate->planstate = NULL;
1652         epqstate->origslot = NULL;
1653         /* ... and remember data that EvalPlanQualBegin will need */
1654         epqstate->plan = subplan;
1655         epqstate->rowMarks = NIL;
1656         epqstate->epqParam = epqParam;
1657 }
1658
1659 /*
1660  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
1661  *
1662  * We need this so that ModifyTuple can deal with multiple subplans.
1663  */
1664 void
1665 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan)
1666 {
1667         /* If we have a live EPQ query, shut it down */
1668         EvalPlanQualEnd(epqstate);
1669         /* And set/change the plan pointer */
1670         epqstate->plan = subplan;
1671 }
1672
1673 /*
1674  * EvalPlanQualAddRowMark -- add an ExecRowMark that EPQ needs to handle.
1675  *
1676  * Currently, only non-locking RowMarks are supported.
1677  */
1678 void
1679 EvalPlanQualAddRowMark(EPQState *epqstate, ExecRowMark *erm)
1680 {
1681         if (RowMarkRequiresRowShareLock(erm->markType))
1682                 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
1683         epqstate->rowMarks = lappend(epqstate->rowMarks, erm);
1684 }
1685
1686 /*
1687  * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
1688  *
1689  * NB: passed tuple must be palloc'd; it may get freed later
1690  */
1691 void
1692 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
1693 {
1694         EState     *estate = epqstate->estate;
1695
1696         Assert(rti > 0);
1697
1698         /*
1699          * free old test tuple, if any, and store new tuple where relation's scan
1700          * node will see it
1701          */
1702         if (estate->es_epqTuple[rti - 1] != NULL)
1703                 heap_freetuple(estate->es_epqTuple[rti - 1]);
1704         estate->es_epqTuple[rti - 1] = tuple;
1705         estate->es_epqTupleSet[rti - 1] = true;
1706 }
1707
1708 /*
1709  * Fetch back the current test tuple (if any) for the specified RTI
1710  */
1711 HeapTuple
1712 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
1713 {
1714         EState     *estate = epqstate->estate;
1715
1716         Assert(rti > 0);
1717
1718         return estate->es_epqTuple[rti - 1];
1719 }
1720
1721 /*
1722  * Fetch the current row values for any non-locked relations that need
1723  * to be scanned by an EvalPlanQual operation.  origslot must have been set
1724  * to contain the current result row (top-level row) that we need to recheck.
1725  */
1726 void
1727 EvalPlanQualFetchRowMarks(EPQState *epqstate)
1728 {
1729         ListCell   *l;
1730
1731         Assert(epqstate->origslot != NULL);
1732
1733         foreach(l, epqstate->rowMarks)
1734         {
1735                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1736                 Datum           datum;
1737                 bool            isNull;
1738                 HeapTupleData tuple;
1739
1740                 /* clear any leftover test tuple for this rel */
1741                 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
1742
1743                 if (erm->relation)
1744                 {
1745                         Buffer          buffer;
1746
1747                         Assert(erm->markType == ROW_MARK_REFERENCE);
1748
1749                         /* if child rel, must check whether it produced this row */
1750                         if (erm->rti != erm->prti)
1751                         {
1752                                 Oid                     tableoid;
1753
1754                                 datum = ExecGetJunkAttribute(epqstate->origslot,
1755                                                                                          erm->toidAttNo,
1756                                                                                          &isNull);
1757                                 /* non-locked rels could be on the inside of outer joins */
1758                                 if (isNull)
1759                                         continue;
1760                                 tableoid = DatumGetObjectId(datum);
1761
1762                                 if (tableoid != RelationGetRelid(erm->relation))
1763                                 {
1764                                         /* this child is inactive right now */
1765                                         continue;
1766                                 }
1767                         }
1768
1769                         /* fetch the tuple's ctid */
1770                         datum = ExecGetJunkAttribute(epqstate->origslot,
1771                                                                                  erm->ctidAttNo,
1772                                                                                  &isNull);
1773                         /* non-locked rels could be on the inside of outer joins */
1774                         if (isNull)
1775                                 continue;
1776                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1777
1778                         /* okay, fetch the tuple */
1779                         if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
1780                                                         false, NULL))
1781                                 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
1782
1783                         /* successful, copy and store tuple */
1784                         EvalPlanQualSetTuple(epqstate, erm->rti,
1785                                                                  heap_copytuple(&tuple));
1786                         ReleaseBuffer(buffer);
1787                 }
1788                 else
1789                 {
1790                         HeapTupleHeader td;
1791
1792                         Assert(erm->markType == ROW_MARK_COPY);
1793
1794                         /* fetch the whole-row Var for the relation */
1795                         datum = ExecGetJunkAttribute(epqstate->origslot,
1796                                                                                  erm->wholeAttNo,
1797                                                                                  &isNull);
1798                         /* non-locked rels could be on the inside of outer joins */
1799                         if (isNull)
1800                                 continue;
1801                         td = DatumGetHeapTupleHeader(datum);
1802
1803                         /* build a temporary HeapTuple control structure */
1804                         tuple.t_len = HeapTupleHeaderGetDatumLength(td);
1805                         ItemPointerSetInvalid(&(tuple.t_self));
1806                         tuple.t_tableOid = InvalidOid;
1807                         tuple.t_data = td;
1808
1809                         /* copy and store tuple */
1810                         EvalPlanQualSetTuple(epqstate, erm->rti,
1811                                                                  heap_copytuple(&tuple));
1812                 }
1813         }
1814 }
1815
1816 /*
1817  * Fetch the next row (if any) from EvalPlanQual testing
1818  *
1819  * (In practice, there should never be more than one row...)
1820  */
1821 TupleTableSlot *
1822 EvalPlanQualNext(EPQState *epqstate)
1823 {
1824         MemoryContext oldcontext;
1825         TupleTableSlot *slot;
1826
1827         oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
1828         slot = ExecProcNode(epqstate->planstate);
1829         MemoryContextSwitchTo(oldcontext);
1830
1831         return slot;
1832 }
1833
1834 /*
1835  * Initialize or reset an EvalPlanQual state tree
1836  */
1837 void
1838 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
1839 {
1840         EState     *estate = epqstate->estate;
1841
1842         if (estate == NULL)
1843         {
1844                 /* First time through, so create a child EState */
1845                 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
1846         }
1847         else
1848         {
1849                 /*
1850                  * We already have a suitable child EPQ tree, so just reset it.
1851                  */
1852                 int                     rtsize = list_length(parentestate->es_range_table);
1853                 PlanState  *planstate = epqstate->planstate;
1854
1855                 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
1856
1857                 /* Recopy current values of parent parameters */
1858                 if (parentestate->es_plannedstmt->nParamExec > 0)
1859                 {
1860                         int                     i = parentestate->es_plannedstmt->nParamExec;
1861
1862                         while (--i >= 0)
1863                         {
1864                                 /* copy value if any, but not execPlan link */
1865                                 estate->es_param_exec_vals[i].value =
1866                                         parentestate->es_param_exec_vals[i].value;
1867                                 estate->es_param_exec_vals[i].isnull =
1868                                         parentestate->es_param_exec_vals[i].isnull;
1869                         }
1870                 }
1871
1872                 /*
1873                  * Mark child plan tree as needing rescan at all scan nodes.  The
1874                  * first ExecProcNode will take care of actually doing the rescan.
1875                  */
1876                 planstate->chgParam = bms_add_member(planstate->chgParam,
1877                                                                                          epqstate->epqParam);
1878         }
1879 }
1880
1881 /*
1882  * Start execution of an EvalPlanQual plan tree.
1883  *
1884  * This is a cut-down version of ExecutorStart(): we copy some state from
1885  * the top-level estate rather than initializing it fresh.
1886  */
1887 static void
1888 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
1889 {
1890         EState     *estate;
1891         int                     rtsize;
1892         MemoryContext oldcontext;
1893         ListCell   *l;
1894
1895         rtsize = list_length(parentestate->es_range_table);
1896
1897         epqstate->estate = estate = CreateExecutorState();
1898
1899         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1900
1901         /*
1902          * Child EPQ EStates share the parent's copy of unchanging state such as
1903          * the snapshot, rangetable, result-rel info, and external Param info.
1904          * They need their own copies of local state, including a tuple table,
1905          * es_param_exec_vals, etc.
1906          */
1907         estate->es_direction = ForwardScanDirection;
1908         estate->es_snapshot = parentestate->es_snapshot;
1909         estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
1910         estate->es_range_table = parentestate->es_range_table;
1911         estate->es_plannedstmt = parentestate->es_plannedstmt;
1912         estate->es_junkFilter = parentestate->es_junkFilter;
1913         estate->es_output_cid = parentestate->es_output_cid;
1914         estate->es_result_relations = parentestate->es_result_relations;
1915         estate->es_num_result_relations = parentestate->es_num_result_relations;
1916         estate->es_result_relation_info = parentestate->es_result_relation_info;
1917         /* es_trig_target_relations must NOT be copied */
1918         estate->es_rowMarks = parentestate->es_rowMarks;
1919         estate->es_instrument = parentestate->es_instrument;
1920         estate->es_select_into = parentestate->es_select_into;
1921         estate->es_into_oids = parentestate->es_into_oids;
1922
1923         /*
1924          * The external param list is simply shared from parent.  The internal
1925          * param workspace has to be local state, but we copy the initial values
1926          * from the parent, so as to have access to any param values that were
1927          * already set from other parts of the parent's plan tree.
1928          */
1929         estate->es_param_list_info = parentestate->es_param_list_info;
1930         if (parentestate->es_plannedstmt->nParamExec > 0)
1931         {
1932                 int                     i = parentestate->es_plannedstmt->nParamExec;
1933
1934                 estate->es_param_exec_vals = (ParamExecData *)
1935                         palloc0(i * sizeof(ParamExecData));
1936                 while (--i >= 0)
1937                 {
1938                         /* copy value if any, but not execPlan link */
1939                         estate->es_param_exec_vals[i].value =
1940                                 parentestate->es_param_exec_vals[i].value;
1941                         estate->es_param_exec_vals[i].isnull =
1942                                 parentestate->es_param_exec_vals[i].isnull;
1943                 }
1944         }
1945
1946         /*
1947          * Each EState must have its own es_epqScanDone state, but if we have
1948          * nested EPQ checks they should share es_epqTuple arrays.      This allows
1949          * sub-rechecks to inherit the values being examined by an outer recheck.
1950          */
1951         estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
1952         if (parentestate->es_epqTuple != NULL)
1953         {
1954                 estate->es_epqTuple = parentestate->es_epqTuple;
1955                 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
1956         }
1957         else
1958         {
1959                 estate->es_epqTuple = (HeapTuple *)
1960                         palloc0(rtsize * sizeof(HeapTuple));
1961                 estate->es_epqTupleSet = (bool *)
1962                         palloc0(rtsize * sizeof(bool));
1963         }
1964
1965         /*
1966          * Each estate also has its own tuple table.
1967          */
1968         estate->es_tupleTable = NIL;
1969
1970         /*
1971          * Initialize private state information for each SubPlan.  We must do this
1972          * before running ExecInitNode on the main query tree, since
1973          * ExecInitSubPlan expects to be able to find these entries. Some of the
1974          * SubPlans might not be used in the part of the plan tree we intend to
1975          * run, but since it's not easy to tell which, we just initialize them
1976          * all.
1977          */
1978         Assert(estate->es_subplanstates == NIL);
1979         foreach(l, parentestate->es_plannedstmt->subplans)
1980         {
1981                 Plan       *subplan = (Plan *) lfirst(l);
1982                 PlanState  *subplanstate;
1983
1984                 subplanstate = ExecInitNode(subplan, estate, 0);
1985
1986                 estate->es_subplanstates = lappend(estate->es_subplanstates,
1987                                                                                    subplanstate);
1988         }
1989
1990         /*
1991          * Initialize the private state information for all the nodes in the part
1992          * of the plan tree we need to run.  This opens files, allocates storage
1993          * and leaves us ready to start processing tuples.
1994          */
1995         epqstate->planstate = ExecInitNode(planTree, estate, 0);
1996
1997         MemoryContextSwitchTo(oldcontext);
1998 }
1999
2000 /*
2001  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
2002  * or if we are done with the current EPQ child.
2003  *
2004  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2005  * of the normal cleanup, but *not* close result relations (which we are
2006  * just sharing from the outer query).  We do, however, have to close any
2007  * trigger target relations that got opened, since those are not shared.
2008  * (There probably shouldn't be any of the latter, but just in case...)
2009  */
2010 void
2011 EvalPlanQualEnd(EPQState *epqstate)
2012 {
2013         EState     *estate = epqstate->estate;
2014         MemoryContext oldcontext;
2015         ListCell   *l;
2016
2017         if (estate == NULL)
2018                 return;                                 /* idle, so nothing to do */
2019
2020         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2021
2022         ExecEndNode(epqstate->planstate);
2023
2024         foreach(l, estate->es_subplanstates)
2025         {
2026                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2027
2028                 ExecEndNode(subplanstate);
2029         }
2030
2031         /* throw away the per-estate tuple table */
2032         ExecResetTupleTable(estate->es_tupleTable, false);
2033
2034         /* close any trigger target relations attached to this EState */
2035         foreach(l, estate->es_trig_target_relations)
2036         {
2037                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2038
2039                 /* Close indices and then the relation itself */
2040                 ExecCloseIndices(resultRelInfo);
2041                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2042         }
2043
2044         MemoryContextSwitchTo(oldcontext);
2045
2046         FreeExecutorState(estate);
2047
2048         /* Mark EPQState idle */
2049         epqstate->estate = NULL;
2050         epqstate->planstate = NULL;
2051         epqstate->origslot = NULL;
2052 }
2053
2054
2055 /*
2056  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2057  *
2058  * We implement SELECT INTO by diverting SELECT's normal output with
2059  * a specialized DestReceiver type.
2060  */
2061
2062 typedef struct
2063 {
2064         DestReceiver pub;                       /* publicly-known function pointers */
2065         EState     *estate;                     /* EState we are working with */
2066         Relation        rel;                    /* Relation to write to */
2067         int                     hi_options;             /* heap_insert performance options */
2068         BulkInsertState bistate;        /* bulk insert state */
2069 } DR_intorel;
2070
2071 /*
2072  * OpenIntoRel --- actually create the SELECT INTO target relation
2073  *
2074  * This also replaces QueryDesc->dest with the special DestReceiver for
2075  * SELECT INTO.  We assume that the correct result tuple type has already
2076  * been placed in queryDesc->tupDesc.
2077  */
2078 static void
2079 OpenIntoRel(QueryDesc *queryDesc)
2080 {
2081         IntoClause *into = queryDesc->plannedstmt->intoClause;
2082         EState     *estate = queryDesc->estate;
2083         Relation        intoRelationDesc;
2084         char       *intoName;
2085         Oid                     namespaceId;
2086         Oid                     tablespaceId;
2087         Datum           reloptions;
2088         AclResult       aclresult;
2089         Oid                     intoRelationId;
2090         TupleDesc       tupdesc;
2091         DR_intorel *myState;
2092         static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
2093
2094         Assert(into);
2095
2096         /*
2097          * XXX This code needs to be kept in sync with DefineRelation(). Maybe we
2098          * should try to use that function instead.
2099          */
2100
2101         /*
2102          * Check consistency of arguments
2103          */
2104         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2105                 ereport(ERROR,
2106                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2107                                  errmsg("ON COMMIT can only be used on temporary tables")));
2108
2109         /*
2110          * Security check: disallow creating temp tables from security-restricted
2111          * code.  This is needed because calling code might not expect untrusted
2112          * tables to appear in pg_temp at the front of its search path.
2113          */
2114         if (into->rel->istemp && InSecurityRestrictedOperation())
2115                 ereport(ERROR,
2116                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2117                                  errmsg("cannot create temporary table within security-restricted operation")));
2118
2119         /*
2120          * Find namespace to create in, check its permissions
2121          */
2122         intoName = into->rel->relname;
2123         namespaceId = RangeVarGetCreationNamespace(into->rel);
2124
2125         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2126                                                                           ACL_CREATE);
2127         if (aclresult != ACLCHECK_OK)
2128                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2129                                            get_namespace_name(namespaceId));
2130
2131         /*
2132          * Select tablespace to use.  If not specified, use default tablespace
2133          * (which may in turn default to database's default).
2134          */
2135         if (into->tableSpaceName)
2136         {
2137                 tablespaceId = get_tablespace_oid(into->tableSpaceName, false);
2138         }
2139         else
2140         {
2141                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2142                 /* note InvalidOid is OK in this case */
2143         }
2144
2145         /* Check permissions except when using the database's default space */
2146         if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2147         {
2148                 AclResult       aclresult;
2149
2150                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2151                                                                                    ACL_CREATE);
2152
2153                 if (aclresult != ACLCHECK_OK)
2154                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2155                                                    get_tablespace_name(tablespaceId));
2156         }
2157
2158         /* Parse and validate any reloptions */
2159         reloptions = transformRelOptions((Datum) 0,
2160                                                                          into->options,
2161                                                                          NULL,
2162                                                                          validnsps,
2163                                                                          true,
2164                                                                          false);
2165         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2166
2167         /* Copy the tupdesc because heap_create_with_catalog modifies it */
2168         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2169
2170         /* Now we can actually create the new relation */
2171         intoRelationId = heap_create_with_catalog(intoName,
2172                                                                                           namespaceId,
2173                                                                                           tablespaceId,
2174                                                                                           InvalidOid,
2175                                                                                           InvalidOid,
2176                                                                                           InvalidOid,
2177                                                                                           GetUserId(),
2178                                                                                           tupdesc,
2179                                                                                           NIL,
2180                                                                                           RELKIND_RELATION,
2181                                                                                           false,
2182                                                                                           false,
2183                                                                                           true,
2184                                                                                           0,
2185                                                                                           into->onCommit,
2186                                                                                           reloptions,
2187                                                                                           true,
2188                                                                                           allowSystemTableMods,
2189                                                                                           false);
2190         Assert(intoRelationId != InvalidOid);
2191
2192         FreeTupleDesc(tupdesc);
2193
2194         /*
2195          * Advance command counter so that the newly-created relation's catalog
2196          * tuples will be visible to heap_open.
2197          */
2198         CommandCounterIncrement();
2199
2200         /*
2201          * If necessary, create a TOAST table for the INTO relation. Note that
2202          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2203          * the TOAST table will be visible for insertion.
2204          */
2205         reloptions = transformRelOptions((Datum) 0,
2206                                                                          into->options,
2207                                                                          "toast",
2208                                                                          validnsps,
2209                                                                          true,
2210                                                                          false);
2211
2212         (void) heap_reloptions(RELKIND_TOASTVALUE, reloptions, true);
2213
2214         AlterTableCreateToastTable(intoRelationId, reloptions);
2215
2216         /*
2217          * And open the constructed table for writing.
2218          */
2219         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2220
2221         /*
2222          * Now replace the query's DestReceiver with one for SELECT INTO
2223          */
2224         queryDesc->dest = CreateDestReceiver(DestIntoRel);
2225         myState = (DR_intorel *) queryDesc->dest;
2226         Assert(myState->pub.mydest == DestIntoRel);
2227         myState->estate = estate;
2228         myState->rel = intoRelationDesc;
2229
2230         /*
2231          * We can skip WAL-logging the insertions, unless PITR or streaming
2232          * replication is in use. We can skip the FSM in any case.
2233          */
2234         myState->hi_options = HEAP_INSERT_SKIP_FSM |
2235                 (XLogIsNeeded() ? 0 : HEAP_INSERT_SKIP_WAL);
2236         myState->bistate = GetBulkInsertState();
2237
2238         /* Not using WAL requires smgr_targblock be initially invalid */
2239         Assert(RelationGetTargetBlock(intoRelationDesc) == InvalidBlockNumber);
2240 }
2241
2242 /*
2243  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2244  */
2245 static void
2246 CloseIntoRel(QueryDesc *queryDesc)
2247 {
2248         DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2249
2250         /* OpenIntoRel might never have gotten called */
2251         if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2252         {
2253                 FreeBulkInsertState(myState->bistate);
2254
2255                 /* If we skipped using WAL, must heap_sync before commit */
2256                 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2257                         heap_sync(myState->rel);
2258
2259                 /* close rel, but keep lock until commit */
2260                 heap_close(myState->rel, NoLock);
2261
2262                 myState->rel = NULL;
2263         }
2264 }
2265
2266 /*
2267  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2268  */
2269 DestReceiver *
2270 CreateIntoRelDestReceiver(void)
2271 {
2272         DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2273
2274         self->pub.receiveSlot = intorel_receive;
2275         self->pub.rStartup = intorel_startup;
2276         self->pub.rShutdown = intorel_shutdown;
2277         self->pub.rDestroy = intorel_destroy;
2278         self->pub.mydest = DestIntoRel;
2279
2280         /* private fields will be set by OpenIntoRel */
2281
2282         return (DestReceiver *) self;
2283 }
2284
2285 /*
2286  * intorel_startup --- executor startup
2287  */
2288 static void
2289 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2290 {
2291         /* no-op */
2292 }
2293
2294 /*
2295  * intorel_receive --- receive one tuple
2296  */
2297 static void
2298 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2299 {
2300         DR_intorel *myState = (DR_intorel *) self;
2301         HeapTuple       tuple;
2302
2303         /*
2304          * get the heap tuple out of the tuple table slot, making sure we have a
2305          * writable copy
2306          */
2307         tuple = ExecMaterializeSlot(slot);
2308
2309         /*
2310          * force assignment of new OID (see comments in ExecInsert)
2311          */
2312         if (myState->rel->rd_rel->relhasoids)
2313                 HeapTupleSetOid(tuple, InvalidOid);
2314
2315         heap_insert(myState->rel,
2316                                 tuple,
2317                                 myState->estate->es_output_cid,
2318                                 myState->hi_options,
2319                                 myState->bistate);
2320
2321         /* We know this is a newly created relation, so there are no indexes */
2322 }
2323
2324 /*
2325  * intorel_shutdown --- executor end
2326  */
2327 static void
2328 intorel_shutdown(DestReceiver *self)
2329 {
2330         /* no-op */
2331 }
2332
2333 /*
2334  * intorel_destroy --- release DestReceiver object
2335  */
2336 static void
2337 intorel_destroy(DestReceiver *self)
2338 {
2339         pfree(self);
2340 }