OSDN Git Service

Support triggers on views.
[pg-rex/syncrep.git] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        src/backend/executor/execMain.c
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/reloptions.h"
36 #include "access/sysattr.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "miscadmin.h"
47 #include "optimizer/clauses.h"
48 #include "parser/parse_clause.h"
49 #include "parser/parsetree.h"
50 #include "storage/bufmgr.h"
51 #include "storage/lmgr.h"
52 #include "storage/smgr.h"
53 #include "tcop/utility.h"
54 #include "utils/acl.h"
55 #include "utils/lsyscache.h"
56 #include "utils/memutils.h"
57 #include "utils/snapmgr.h"
58 #include "utils/tqual.h"
59
60
61 /* Hooks for plugins to get control in ExecutorStart/Run/End() */
62 ExecutorStart_hook_type ExecutorStart_hook = NULL;
63 ExecutorRun_hook_type ExecutorRun_hook = NULL;
64 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
65
66 /* Hook for plugin to get control in ExecCheckRTPerms() */
67 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
68
69 /* decls for local routines only used within this module */
70 static void InitPlan(QueryDesc *queryDesc, int eflags);
71 static void ExecEndPlan(PlanState *planstate, EState *estate);
72 static void ExecutePlan(EState *estate, PlanState *planstate,
73                         CmdType operation,
74                         bool sendTuples,
75                         long numberTuples,
76                         ScanDirection direction,
77                         DestReceiver *dest);
78 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
79 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
80 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
81                                   Plan *planTree);
82 static void OpenIntoRel(QueryDesc *queryDesc);
83 static void CloseIntoRel(QueryDesc *queryDesc);
84 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
85 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
86 static void intorel_shutdown(DestReceiver *self);
87 static void intorel_destroy(DestReceiver *self);
88
89 /* end of local decls */
90
91
92 /* ----------------------------------------------------------------
93  *              ExecutorStart
94  *
95  *              This routine must be called at the beginning of any execution of any
96  *              query plan
97  *
98  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
99  * clear why we bother to separate the two functions, but...).  The tupDesc
100  * field of the QueryDesc is filled in to describe the tuples that will be
101  * returned, and the internal fields (estate and planstate) are set up.
102  *
103  * eflags contains flag bits as described in executor.h.
104  *
105  * NB: the CurrentMemoryContext when this is called will become the parent
106  * of the per-query context used for this Executor invocation.
107  *
108  * We provide a function hook variable that lets loadable plugins
109  * get control when ExecutorStart is called.  Such a plugin would
110  * normally call standard_ExecutorStart().
111  *
112  * ----------------------------------------------------------------
113  */
114 void
115 ExecutorStart(QueryDesc *queryDesc, int eflags)
116 {
117         if (ExecutorStart_hook)
118                 (*ExecutorStart_hook) (queryDesc, eflags);
119         else
120                 standard_ExecutorStart(queryDesc, eflags);
121 }
122
123 void
124 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
125 {
126         EState     *estate;
127         MemoryContext oldcontext;
128
129         /* sanity checks: queryDesc must not be started already */
130         Assert(queryDesc != NULL);
131         Assert(queryDesc->estate == NULL);
132
133         /*
134          * If the transaction is read-only, we need to check if any writes are
135          * planned to non-temporary tables.  EXPLAIN is considered read-only.
136          */
137         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
138                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
139
140         /*
141          * Build EState, switch into per-query memory context for startup.
142          */
143         estate = CreateExecutorState();
144         queryDesc->estate = estate;
145
146         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
147
148         /*
149          * Fill in external parameters, if any, from queryDesc; and allocate
150          * workspace for internal parameters
151          */
152         estate->es_param_list_info = queryDesc->params;
153
154         if (queryDesc->plannedstmt->nParamExec > 0)
155                 estate->es_param_exec_vals = (ParamExecData *)
156                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
157
158         /*
159          * If non-read-only query, set the command ID to mark output tuples with
160          */
161         switch (queryDesc->operation)
162         {
163                 case CMD_SELECT:
164                         /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
165                         if (queryDesc->plannedstmt->intoClause != NULL ||
166                                 queryDesc->plannedstmt->rowMarks != NIL)
167                                 estate->es_output_cid = GetCurrentCommandId(true);
168                         break;
169
170                 case CMD_INSERT:
171                 case CMD_DELETE:
172                 case CMD_UPDATE:
173                         estate->es_output_cid = GetCurrentCommandId(true);
174                         break;
175
176                 default:
177                         elog(ERROR, "unrecognized operation code: %d",
178                                  (int) queryDesc->operation);
179                         break;
180         }
181
182         /*
183          * Copy other important information into the EState
184          */
185         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
186         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
187         estate->es_instrument = queryDesc->instrument_options;
188
189         /*
190          * Initialize the plan state tree
191          */
192         InitPlan(queryDesc, eflags);
193
194         MemoryContextSwitchTo(oldcontext);
195 }
196
197 /* ----------------------------------------------------------------
198  *              ExecutorRun
199  *
200  *              This is the main routine of the executor module. It accepts
201  *              the query descriptor from the traffic cop and executes the
202  *              query plan.
203  *
204  *              ExecutorStart must have been called already.
205  *
206  *              If direction is NoMovementScanDirection then nothing is done
207  *              except to start up/shut down the destination.  Otherwise,
208  *              we retrieve up to 'count' tuples in the specified direction.
209  *
210  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
211  *              completion.
212  *
213  *              There is no return value, but output tuples (if any) are sent to
214  *              the destination receiver specified in the QueryDesc; and the number
215  *              of tuples processed at the top level can be found in
216  *              estate->es_processed.
217  *
218  *              We provide a function hook variable that lets loadable plugins
219  *              get control when ExecutorRun is called.  Such a plugin would
220  *              normally call standard_ExecutorRun().
221  *
222  * ----------------------------------------------------------------
223  */
224 void
225 ExecutorRun(QueryDesc *queryDesc,
226                         ScanDirection direction, long count)
227 {
228         if (ExecutorRun_hook)
229                 (*ExecutorRun_hook) (queryDesc, direction, count);
230         else
231                 standard_ExecutorRun(queryDesc, direction, count);
232 }
233
234 void
235 standard_ExecutorRun(QueryDesc *queryDesc,
236                                          ScanDirection direction, long count)
237 {
238         EState     *estate;
239         CmdType         operation;
240         DestReceiver *dest;
241         bool            sendTuples;
242         MemoryContext oldcontext;
243
244         /* sanity checks */
245         Assert(queryDesc != NULL);
246
247         estate = queryDesc->estate;
248
249         Assert(estate != NULL);
250
251         /*
252          * Switch into per-query memory context
253          */
254         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
255
256         /* Allow instrumentation of ExecutorRun overall runtime */
257         if (queryDesc->totaltime)
258                 InstrStartNode(queryDesc->totaltime);
259
260         /*
261          * extract information from the query descriptor and the query feature.
262          */
263         operation = queryDesc->operation;
264         dest = queryDesc->dest;
265
266         /*
267          * startup tuple receiver, if we will be emitting tuples
268          */
269         estate->es_processed = 0;
270         estate->es_lastoid = InvalidOid;
271
272         sendTuples = (operation == CMD_SELECT ||
273                                   queryDesc->plannedstmt->hasReturning);
274
275         if (sendTuples)
276                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
277
278         /*
279          * run plan
280          */
281         if (!ScanDirectionIsNoMovement(direction))
282                 ExecutePlan(estate,
283                                         queryDesc->planstate,
284                                         operation,
285                                         sendTuples,
286                                         count,
287                                         direction,
288                                         dest);
289
290         /*
291          * shutdown tuple receiver, if we started it
292          */
293         if (sendTuples)
294                 (*dest->rShutdown) (dest);
295
296         if (queryDesc->totaltime)
297                 InstrStopNode(queryDesc->totaltime, estate->es_processed);
298
299         MemoryContextSwitchTo(oldcontext);
300 }
301
302 /* ----------------------------------------------------------------
303  *              ExecutorEnd
304  *
305  *              This routine must be called at the end of execution of any
306  *              query plan
307  *
308  *              We provide a function hook variable that lets loadable plugins
309  *              get control when ExecutorEnd is called.  Such a plugin would
310  *              normally call standard_ExecutorEnd().
311  *
312  * ----------------------------------------------------------------
313  */
314 void
315 ExecutorEnd(QueryDesc *queryDesc)
316 {
317         if (ExecutorEnd_hook)
318                 (*ExecutorEnd_hook) (queryDesc);
319         else
320                 standard_ExecutorEnd(queryDesc);
321 }
322
323 void
324 standard_ExecutorEnd(QueryDesc *queryDesc)
325 {
326         EState     *estate;
327         MemoryContext oldcontext;
328
329         /* sanity checks */
330         Assert(queryDesc != NULL);
331
332         estate = queryDesc->estate;
333
334         Assert(estate != NULL);
335
336         /*
337          * Switch into per-query memory context to run ExecEndPlan
338          */
339         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
340
341         ExecEndPlan(queryDesc->planstate, estate);
342
343         /*
344          * Close the SELECT INTO relation if any
345          */
346         if (estate->es_select_into)
347                 CloseIntoRel(queryDesc);
348
349         /* do away with our snapshots */
350         UnregisterSnapshot(estate->es_snapshot);
351         UnregisterSnapshot(estate->es_crosscheck_snapshot);
352
353         /*
354          * Must switch out of context before destroying it
355          */
356         MemoryContextSwitchTo(oldcontext);
357
358         /*
359          * Release EState and per-query memory context.  This should release
360          * everything the executor has allocated.
361          */
362         FreeExecutorState(estate);
363
364         /* Reset queryDesc fields that no longer point to anything */
365         queryDesc->tupDesc = NULL;
366         queryDesc->estate = NULL;
367         queryDesc->planstate = NULL;
368         queryDesc->totaltime = NULL;
369 }
370
371 /* ----------------------------------------------------------------
372  *              ExecutorRewind
373  *
374  *              This routine may be called on an open queryDesc to rewind it
375  *              to the start.
376  * ----------------------------------------------------------------
377  */
378 void
379 ExecutorRewind(QueryDesc *queryDesc)
380 {
381         EState     *estate;
382         MemoryContext oldcontext;
383
384         /* sanity checks */
385         Assert(queryDesc != NULL);
386
387         estate = queryDesc->estate;
388
389         Assert(estate != NULL);
390
391         /* It's probably not sensible to rescan updating queries */
392         Assert(queryDesc->operation == CMD_SELECT);
393
394         /*
395          * Switch into per-query memory context
396          */
397         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
398
399         /*
400          * rescan plan
401          */
402         ExecReScan(queryDesc->planstate);
403
404         MemoryContextSwitchTo(oldcontext);
405 }
406
407
408 /*
409  * ExecCheckRTPerms
410  *              Check access permissions for all relations listed in a range table.
411  *
412  * Returns true if permissions are adequate.  Otherwise, throws an appropriate
413  * error if ereport_on_violation is true, or simply returns false otherwise.
414  */
415 bool
416 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
417 {
418         ListCell   *l;
419         bool            result = true;
420
421         foreach(l, rangeTable)
422         {
423                 RangeTblEntry  *rte = (RangeTblEntry *) lfirst(l);
424
425                 result = ExecCheckRTEPerms(rte);
426                 if (!result)
427                 {
428                         Assert(rte->rtekind == RTE_RELATION);
429                         if (ereport_on_violation)
430                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
431                                                            get_rel_name(rte->relid));
432                         return false;
433                 }
434         }
435
436         if (ExecutorCheckPerms_hook)
437                 result = (*ExecutorCheckPerms_hook)(rangeTable,
438                                                                                         ereport_on_violation);
439         return result;
440 }
441
442 /*
443  * ExecCheckRTEPerms
444  *              Check access permissions for a single RTE.
445  */
446 static bool
447 ExecCheckRTEPerms(RangeTblEntry *rte)
448 {
449         AclMode         requiredPerms;
450         AclMode         relPerms;
451         AclMode         remainingPerms;
452         Oid                     relOid;
453         Oid                     userid;
454         Bitmapset  *tmpset;
455         int                     col;
456
457         /*
458          * Only plain-relation RTEs need to be checked here.  Function RTEs are
459          * checked by init_fcache when the function is prepared for execution.
460          * Join, subquery, and special RTEs need no checks.
461          */
462         if (rte->rtekind != RTE_RELATION)
463                 return true;
464
465         /*
466          * No work if requiredPerms is empty.
467          */
468         requiredPerms = rte->requiredPerms;
469         if (requiredPerms == 0)
470                 return true;
471
472         relOid = rte->relid;
473
474         /*
475          * userid to check as: current user unless we have a setuid indication.
476          *
477          * Note: GetUserId() is presently fast enough that there's no harm in
478          * calling it separately for each RTE.  If that stops being true, we could
479          * call it once in ExecCheckRTPerms and pass the userid down from there.
480          * But for now, no need for the extra clutter.
481          */
482         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
483
484         /*
485          * We must have *all* the requiredPerms bits, but some of the bits can be
486          * satisfied from column-level rather than relation-level permissions.
487          * First, remove any bits that are satisfied by relation permissions.
488          */
489         relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
490         remainingPerms = requiredPerms & ~relPerms;
491         if (remainingPerms != 0)
492         {
493                 /*
494                  * If we lack any permissions that exist only as relation permissions,
495                  * we can fail straight away.
496                  */
497                 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
498                         return false;
499
500                 /*
501                  * Check to see if we have the needed privileges at column level.
502                  *
503                  * Note: failures just report a table-level error; it would be nicer
504                  * to report a column-level error if we have some but not all of the
505                  * column privileges.
506                  */
507                 if (remainingPerms & ACL_SELECT)
508                 {
509                         /*
510                          * When the query doesn't explicitly reference any columns (for
511                          * example, SELECT COUNT(*) FROM table), allow the query if we
512                          * have SELECT on any column of the rel, as per SQL spec.
513                          */
514                         if (bms_is_empty(rte->selectedCols))
515                         {
516                                 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
517                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
518                                         return false;
519                         }
520
521                         tmpset = bms_copy(rte->selectedCols);
522                         while ((col = bms_first_member(tmpset)) >= 0)
523                         {
524                                 /* remove the column number offset */
525                                 col += FirstLowInvalidHeapAttributeNumber;
526                                 if (col == InvalidAttrNumber)
527                                 {
528                                         /* Whole-row reference, must have priv on all cols */
529                                         if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
530                                                                                                   ACLMASK_ALL) != ACLCHECK_OK)
531                                                 return false;
532                                 }
533                                 else
534                                 {
535                                         if (pg_attribute_aclcheck(relOid, col, userid,
536                                                                                           ACL_SELECT) != ACLCHECK_OK)
537                                                 return false;
538                                 }
539                         }
540                         bms_free(tmpset);
541                 }
542
543                 /*
544                  * Basically the same for the mod columns, with either INSERT or
545                  * UPDATE privilege as specified by remainingPerms.
546                  */
547                 remainingPerms &= ~ACL_SELECT;
548                 if (remainingPerms != 0)
549                 {
550                         /*
551                          * When the query doesn't explicitly change any columns, allow the
552                          * query if we have permission on any column of the rel.  This is
553                          * to handle SELECT FOR UPDATE as well as possible corner cases in
554                          * INSERT and UPDATE.
555                          */
556                         if (bms_is_empty(rte->modifiedCols))
557                         {
558                                 if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
559                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
560                                         return false;
561                         }
562
563                         tmpset = bms_copy(rte->modifiedCols);
564                         while ((col = bms_first_member(tmpset)) >= 0)
565                         {
566                                 /* remove the column number offset */
567                                 col += FirstLowInvalidHeapAttributeNumber;
568                                 if (col == InvalidAttrNumber)
569                                 {
570                                         /* whole-row reference can't happen here */
571                                         elog(ERROR, "whole-row update is not implemented");
572                                 }
573                                 else
574                                 {
575                                         if (pg_attribute_aclcheck(relOid, col, userid,
576                                                                                           remainingPerms) != ACLCHECK_OK)
577                                                 return false;
578                                 }
579                         }
580                         bms_free(tmpset);
581                 }
582         }
583         return true;
584 }
585
586 /*
587  * Check that the query does not imply any writes to non-temp tables.
588  *
589  * Note: in a Hot Standby slave this would need to reject writes to temp
590  * tables as well; but an HS slave can't have created any temp tables
591  * in the first place, so no need to check that.
592  */
593 static void
594 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
595 {
596         ListCell   *l;
597
598         /*
599          * CREATE TABLE AS or SELECT INTO?
600          *
601          * XXX should we allow this if the destination is temp?  Considering that
602          * it would still require catalog changes, probably not.
603          */
604         if (plannedstmt->intoClause != NULL)
605                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
606
607         /* Fail if write permissions are requested on any non-temp table */
608         foreach(l, plannedstmt->rtable)
609         {
610                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
611
612                 if (rte->rtekind != RTE_RELATION)
613                         continue;
614
615                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
616                         continue;
617
618                 if (isTempNamespace(get_rel_namespace(rte->relid)))
619                         continue;
620
621                 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
622         }
623 }
624
625
626 /* ----------------------------------------------------------------
627  *              InitPlan
628  *
629  *              Initializes the query plan: open files, allocate storage
630  *              and start up the rule manager
631  * ----------------------------------------------------------------
632  */
633 static void
634 InitPlan(QueryDesc *queryDesc, int eflags)
635 {
636         CmdType         operation = queryDesc->operation;
637         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
638         Plan       *plan = plannedstmt->planTree;
639         List       *rangeTable = plannedstmt->rtable;
640         EState     *estate = queryDesc->estate;
641         PlanState  *planstate;
642         TupleDesc       tupType;
643         ListCell   *l;
644         int                     i;
645
646         /*
647          * Do permissions checks
648          */
649         ExecCheckRTPerms(rangeTable, true);
650
651         /*
652          * initialize the node's execution state
653          */
654         estate->es_range_table = rangeTable;
655         estate->es_plannedstmt = plannedstmt;
656
657         /*
658          * initialize result relation stuff, and open/lock the result rels.
659          *
660          * We must do this before initializing the plan tree, else we might try to
661          * do a lock upgrade if a result rel is also a source rel.
662          */
663         if (plannedstmt->resultRelations)
664         {
665                 List       *resultRelations = plannedstmt->resultRelations;
666                 int                     numResultRelations = list_length(resultRelations);
667                 ResultRelInfo *resultRelInfos;
668                 ResultRelInfo *resultRelInfo;
669
670                 resultRelInfos = (ResultRelInfo *)
671                         palloc(numResultRelations * sizeof(ResultRelInfo));
672                 resultRelInfo = resultRelInfos;
673                 foreach(l, resultRelations)
674                 {
675                         Index           resultRelationIndex = lfirst_int(l);
676                         Oid                     resultRelationOid;
677                         Relation        resultRelation;
678
679                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
680                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
681                         InitResultRelInfo(resultRelInfo,
682                                                           resultRelation,
683                                                           resultRelationIndex,
684                                                           operation,
685                                                           estate->es_instrument);
686                         resultRelInfo++;
687                 }
688                 estate->es_result_relations = resultRelInfos;
689                 estate->es_num_result_relations = numResultRelations;
690                 /* es_result_relation_info is NULL except when within ModifyTable */
691                 estate->es_result_relation_info = NULL;
692         }
693         else
694         {
695                 /*
696                  * if no result relation, then set state appropriately
697                  */
698                 estate->es_result_relations = NULL;
699                 estate->es_num_result_relations = 0;
700                 estate->es_result_relation_info = NULL;
701         }
702
703         /*
704          * Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
705          * before we initialize the plan tree, else we'd be risking lock upgrades.
706          * While we are at it, build the ExecRowMark list.
707          */
708         estate->es_rowMarks = NIL;
709         foreach(l, plannedstmt->rowMarks)
710         {
711                 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
712                 Oid                     relid;
713                 Relation        relation;
714                 ExecRowMark *erm;
715
716                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
717                 if (rc->isParent)
718                         continue;
719
720                 switch (rc->markType)
721                 {
722                         case ROW_MARK_EXCLUSIVE:
723                         case ROW_MARK_SHARE:
724                                 relid = getrelid(rc->rti, rangeTable);
725                                 relation = heap_open(relid, RowShareLock);
726                                 break;
727                         case ROW_MARK_REFERENCE:
728                                 relid = getrelid(rc->rti, rangeTable);
729                                 relation = heap_open(relid, AccessShareLock);
730                                 break;
731                         case ROW_MARK_COPY:
732                                 /* there's no real table here ... */
733                                 relation = NULL;
734                                 break;
735                         default:
736                                 elog(ERROR, "unrecognized markType: %d", rc->markType);
737                                 relation = NULL;        /* keep compiler quiet */
738                                 break;
739                 }
740
741                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
742                 erm->relation = relation;
743                 erm->rti = rc->rti;
744                 erm->prti = rc->prti;
745                 erm->markType = rc->markType;
746                 erm->noWait = rc->noWait;
747                 erm->ctidAttNo = rc->ctidAttNo;
748                 erm->toidAttNo = rc->toidAttNo;
749                 erm->wholeAttNo = rc->wholeAttNo;
750                 ItemPointerSetInvalid(&(erm->curCtid));
751                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
752         }
753
754         /*
755          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
756          * flag appropriately so that the plan tree will be initialized with the
757          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
758          */
759         estate->es_select_into = false;
760         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
761         {
762                 estate->es_select_into = true;
763                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
764         }
765
766         /*
767          * Initialize the executor's tuple table to empty.
768          */
769         estate->es_tupleTable = NIL;
770         estate->es_trig_tuple_slot = NULL;
771         estate->es_trig_oldtup_slot = NULL;
772
773         /* mark EvalPlanQual not active */
774         estate->es_epqTuple = NULL;
775         estate->es_epqTupleSet = NULL;
776         estate->es_epqScanDone = NULL;
777
778         /*
779          * Initialize private state information for each SubPlan.  We must do this
780          * before running ExecInitNode on the main query tree, since
781          * ExecInitSubPlan expects to be able to find these entries.
782          */
783         Assert(estate->es_subplanstates == NIL);
784         i = 1;                                          /* subplan indices count from 1 */
785         foreach(l, plannedstmt->subplans)
786         {
787                 Plan       *subplan = (Plan *) lfirst(l);
788                 PlanState  *subplanstate;
789                 int                     sp_eflags;
790
791                 /*
792                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
793                  * it is a parameterless subplan (not initplan), we suggest that it be
794                  * prepared to handle REWIND efficiently; otherwise there is no need.
795                  */
796                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
797                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
798                         sp_eflags |= EXEC_FLAG_REWIND;
799
800                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
801
802                 estate->es_subplanstates = lappend(estate->es_subplanstates,
803                                                                                    subplanstate);
804
805                 i++;
806         }
807
808         /*
809          * Initialize the private state information for all the nodes in the query
810          * tree.  This opens files, allocates storage and leaves us ready to start
811          * processing tuples.
812          */
813         planstate = ExecInitNode(plan, estate, eflags);
814
815         /*
816          * Get the tuple descriptor describing the type of tuples to return. (this
817          * is especially important if we are creating a relation with "SELECT
818          * INTO")
819          */
820         tupType = ExecGetResultType(planstate);
821
822         /*
823          * Initialize the junk filter if needed.  SELECT queries need a filter if
824          * there are any junk attrs in the top-level tlist.
825          */
826         if (operation == CMD_SELECT)
827         {
828                 bool            junk_filter_needed = false;
829                 ListCell   *tlist;
830
831                 foreach(tlist, plan->targetlist)
832                 {
833                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
834
835                         if (tle->resjunk)
836                         {
837                                 junk_filter_needed = true;
838                                 break;
839                         }
840                 }
841
842                 if (junk_filter_needed)
843                 {
844                         JunkFilter *j;
845
846                         j = ExecInitJunkFilter(planstate->plan->targetlist,
847                                                                    tupType->tdhasoid,
848                                                                    ExecInitExtraTupleSlot(estate));
849                         estate->es_junkFilter = j;
850
851                         /* Want to return the cleaned tuple type */
852                         tupType = j->jf_cleanTupType;
853                 }
854         }
855
856         queryDesc->tupDesc = tupType;
857         queryDesc->planstate = planstate;
858
859         /*
860          * If doing SELECT INTO, initialize the "into" relation.  We must wait
861          * till now so we have the "clean" result tuple type to create the new
862          * table from.
863          *
864          * If EXPLAIN, skip creating the "into" relation.
865          */
866         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
867                 OpenIntoRel(queryDesc);
868 }
869
870 /*
871  * Initialize ResultRelInfo data for one result relation
872  */
873 void
874 InitResultRelInfo(ResultRelInfo *resultRelInfo,
875                                   Relation resultRelationDesc,
876                                   Index resultRelationIndex,
877                                   CmdType operation,
878                                   int instrument_options)
879 {
880         TriggerDesc     *trigDesc = resultRelationDesc->trigdesc;
881
882         /*
883          * Check valid relkind ... in most cases parser and/or planner should have
884          * noticed this already, but let's make sure.  In the view case we do need
885          * a test here, because if the view wasn't rewritten by a rule, it had
886          * better have an INSTEAD trigger.
887          */
888         switch (resultRelationDesc->rd_rel->relkind)
889         {
890                 case RELKIND_RELATION:
891                         /* OK */
892                         break;
893                 case RELKIND_SEQUENCE:
894                         ereport(ERROR,
895                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
896                                          errmsg("cannot change sequence \"%s\"",
897                                                         RelationGetRelationName(resultRelationDesc))));
898                         break;
899                 case RELKIND_TOASTVALUE:
900                         ereport(ERROR,
901                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
902                                          errmsg("cannot change TOAST relation \"%s\"",
903                                                         RelationGetRelationName(resultRelationDesc))));
904                         break;
905                 case RELKIND_VIEW:
906                         switch (operation)
907                         {
908                                 case CMD_INSERT:
909                                         if (!trigDesc || !trigDesc->trig_insert_instead_row)
910                                                 ereport(ERROR,
911                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
912                                                                  errmsg("cannot insert into view \"%s\"",
913                                                                                 RelationGetRelationName(resultRelationDesc)),
914                                                                  errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger.")));
915                                         break;
916                                 case CMD_UPDATE:
917                                         if (!trigDesc || !trigDesc->trig_update_instead_row)
918                                                 ereport(ERROR,
919                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
920                                                                  errmsg("cannot update view \"%s\"",
921                                                                                 RelationGetRelationName(resultRelationDesc)),
922                                                                  errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger.")));
923                                         break;
924                                 case CMD_DELETE:
925                                         if (!trigDesc || !trigDesc->trig_delete_instead_row)
926                                                 ereport(ERROR,
927                                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
928                                                                  errmsg("cannot delete from view \"%s\"",
929                                                                                 RelationGetRelationName(resultRelationDesc)),
930                                                                  errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger.")));
931                                         break;
932                                 default:
933                                         elog(ERROR, "unrecognized CmdType: %d", (int) operation);
934                                         break;
935                         }
936                         break;
937                 default:
938                         ereport(ERROR,
939                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
940                                          errmsg("cannot change relation \"%s\"",
941                                                         RelationGetRelationName(resultRelationDesc))));
942                         break;
943         }
944
945         /* OK, fill in the node */
946         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
947         resultRelInfo->type = T_ResultRelInfo;
948         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
949         resultRelInfo->ri_RelationDesc = resultRelationDesc;
950         resultRelInfo->ri_NumIndices = 0;
951         resultRelInfo->ri_IndexRelationDescs = NULL;
952         resultRelInfo->ri_IndexRelationInfo = NULL;
953         /* make a copy so as not to depend on relcache info not changing... */
954         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(trigDesc);
955         if (resultRelInfo->ri_TrigDesc)
956         {
957                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
958
959                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
960                         palloc0(n * sizeof(FmgrInfo));
961                 resultRelInfo->ri_TrigWhenExprs = (List **)
962                         palloc0(n * sizeof(List *));
963                 if (instrument_options)
964                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
965         }
966         else
967         {
968                 resultRelInfo->ri_TrigFunctions = NULL;
969                 resultRelInfo->ri_TrigWhenExprs = NULL;
970                 resultRelInfo->ri_TrigInstrument = NULL;
971         }
972         resultRelInfo->ri_ConstraintExprs = NULL;
973         resultRelInfo->ri_junkFilter = NULL;
974         resultRelInfo->ri_projectReturning = NULL;
975
976         /*
977          * If there are indices on the result relation, open them and save
978          * descriptors in the result relation info, so that we can add new index
979          * entries for the tuples we add/update.  We need not do this for a
980          * DELETE, however, since deletion doesn't affect indexes.
981          */
982         if (resultRelationDesc->rd_rel->relhasindex &&
983                 operation != CMD_DELETE)
984                 ExecOpenIndices(resultRelInfo);
985 }
986
987 /*
988  *              ExecGetTriggerResultRel
989  *
990  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
991  * triggers are fired on one of the result relations of the query, and so
992  * we can just return a member of the es_result_relations array.  (Note: in
993  * self-join situations there might be multiple members with the same OID;
994  * if so it doesn't matter which one we pick.)  However, it is sometimes
995  * necessary to fire triggers on other relations; this happens mainly when an
996  * RI update trigger queues additional triggers on other relations, which will
997  * be processed in the context of the outer query.      For efficiency's sake,
998  * we want to have a ResultRelInfo for those triggers too; that can avoid
999  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
1000  * ANALYZE to report the runtimes of such triggers.)  So we make additional
1001  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1002  */
1003 ResultRelInfo *
1004 ExecGetTriggerResultRel(EState *estate, Oid relid)
1005 {
1006         ResultRelInfo *rInfo;
1007         int                     nr;
1008         ListCell   *l;
1009         Relation        rel;
1010         MemoryContext oldcontext;
1011
1012         /* First, search through the query result relations */
1013         rInfo = estate->es_result_relations;
1014         nr = estate->es_num_result_relations;
1015         while (nr > 0)
1016         {
1017                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1018                         return rInfo;
1019                 rInfo++;
1020                 nr--;
1021         }
1022         /* Nope, but maybe we already made an extra ResultRelInfo for it */
1023         foreach(l, estate->es_trig_target_relations)
1024         {
1025                 rInfo = (ResultRelInfo *) lfirst(l);
1026                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1027                         return rInfo;
1028         }
1029         /* Nope, so we need a new one */
1030
1031         /*
1032          * Open the target relation's relcache entry.  We assume that an
1033          * appropriate lock is still held by the backend from whenever the trigger
1034          * event got queued, so we need take no new lock here.
1035          */
1036         rel = heap_open(relid, NoLock);
1037
1038         /*
1039          * Make the new entry in the right context.  Currently, we don't need any
1040          * index information in ResultRelInfos used only for triggers, so tell
1041          * InitResultRelInfo it's a DELETE.
1042          */
1043         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1044         rInfo = makeNode(ResultRelInfo);
1045         InitResultRelInfo(rInfo,
1046                                           rel,
1047                                           0,            /* dummy rangetable index */
1048                                           CMD_DELETE,
1049                                           estate->es_instrument);
1050         estate->es_trig_target_relations =
1051                 lappend(estate->es_trig_target_relations, rInfo);
1052         MemoryContextSwitchTo(oldcontext);
1053
1054         return rInfo;
1055 }
1056
1057 /*
1058  *              ExecContextForcesOids
1059  *
1060  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1061  * we need to ensure that result tuples have space for an OID iff they are
1062  * going to be stored into a relation that has OIDs.  In other contexts
1063  * we are free to choose whether to leave space for OIDs in result tuples
1064  * (we generally don't want to, but we do if a physical-tlist optimization
1065  * is possible).  This routine checks the plan context and returns TRUE if the
1066  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1067  * *hasoids is set to the required value.
1068  *
1069  * One reason this is ugly is that all plan nodes in the plan tree will emit
1070  * tuples with space for an OID, though we really only need the topmost node
1071  * to do so.  However, node types like Sort don't project new tuples but just
1072  * return their inputs, and in those cases the requirement propagates down
1073  * to the input node.  Eventually we might make this code smart enough to
1074  * recognize how far down the requirement really goes, but for now we just
1075  * make all plan nodes do the same thing if the top level forces the choice.
1076  *
1077  * We assume that if we are generating tuples for INSERT or UPDATE,
1078  * estate->es_result_relation_info is already set up to describe the target
1079  * relation.  Note that in an UPDATE that spans an inheritance tree, some of
1080  * the target relations may have OIDs and some not.  We have to make the
1081  * decisions on a per-relation basis as we initialize each of the subplans of
1082  * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1083  * while initializing each subplan.
1084  *
1085  * SELECT INTO is even uglier, because we don't have the INTO relation's
1086  * descriptor available when this code runs; we have to look aside at a
1087  * flag set by InitPlan().
1088  */
1089 bool
1090 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1091 {
1092         ResultRelInfo *ri = planstate->state->es_result_relation_info;
1093
1094         if (ri != NULL)
1095         {
1096                 Relation        rel = ri->ri_RelationDesc;
1097
1098                 if (rel != NULL)
1099                 {
1100                         *hasoids = rel->rd_rel->relhasoids;
1101                         return true;
1102                 }
1103         }
1104
1105         if (planstate->state->es_select_into)
1106         {
1107                 *hasoids = planstate->state->es_into_oids;
1108                 return true;
1109         }
1110
1111         return false;
1112 }
1113
1114 /* ----------------------------------------------------------------
1115  *              ExecEndPlan
1116  *
1117  *              Cleans up the query plan -- closes files and frees up storage
1118  *
1119  * NOTE: we are no longer very worried about freeing storage per se
1120  * in this code; FreeExecutorState should be guaranteed to release all
1121  * memory that needs to be released.  What we are worried about doing
1122  * is closing relations and dropping buffer pins.  Thus, for example,
1123  * tuple tables must be cleared or dropped to ensure pins are released.
1124  * ----------------------------------------------------------------
1125  */
1126 static void
1127 ExecEndPlan(PlanState *planstate, EState *estate)
1128 {
1129         ResultRelInfo *resultRelInfo;
1130         int                     i;
1131         ListCell   *l;
1132
1133         /*
1134          * shut down the node-type-specific query processing
1135          */
1136         ExecEndNode(planstate);
1137
1138         /*
1139          * for subplans too
1140          */
1141         foreach(l, estate->es_subplanstates)
1142         {
1143                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1144
1145                 ExecEndNode(subplanstate);
1146         }
1147
1148         /*
1149          * destroy the executor's tuple table.  Actually we only care about
1150          * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1151          * the TupleTableSlots, since the containing memory context is about to go
1152          * away anyway.
1153          */
1154         ExecResetTupleTable(estate->es_tupleTable, false);
1155
1156         /*
1157          * close the result relation(s) if any, but hold locks until xact commit.
1158          */
1159         resultRelInfo = estate->es_result_relations;
1160         for (i = estate->es_num_result_relations; i > 0; i--)
1161         {
1162                 /* Close indices and then the relation itself */
1163                 ExecCloseIndices(resultRelInfo);
1164                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1165                 resultRelInfo++;
1166         }
1167
1168         /*
1169          * likewise close any trigger target relations
1170          */
1171         foreach(l, estate->es_trig_target_relations)
1172         {
1173                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1174                 /* Close indices and then the relation itself */
1175                 ExecCloseIndices(resultRelInfo);
1176                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1177         }
1178
1179         /*
1180          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1181          */
1182         foreach(l, estate->es_rowMarks)
1183         {
1184                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1185
1186                 if (erm->relation)
1187                         heap_close(erm->relation, NoLock);
1188         }
1189 }
1190
1191 /* ----------------------------------------------------------------
1192  *              ExecutePlan
1193  *
1194  *              Processes the query plan until we have processed 'numberTuples' tuples,
1195  *              moving in the specified direction.
1196  *
1197  *              Runs to completion if numberTuples is 0
1198  *
1199  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1200  * user can see it
1201  * ----------------------------------------------------------------
1202  */
1203 static void
1204 ExecutePlan(EState *estate,
1205                         PlanState *planstate,
1206                         CmdType operation,
1207                         bool sendTuples,
1208                         long numberTuples,
1209                         ScanDirection direction,
1210                         DestReceiver *dest)
1211 {
1212         TupleTableSlot *slot;
1213         long            current_tuple_count;
1214
1215         /*
1216          * initialize local variables
1217          */
1218         current_tuple_count = 0;
1219
1220         /*
1221          * Set the direction.
1222          */
1223         estate->es_direction = direction;
1224
1225         /*
1226          * Loop until we've processed the proper number of tuples from the plan.
1227          */
1228         for (;;)
1229         {
1230                 /* Reset the per-output-tuple exprcontext */
1231                 ResetPerTupleExprContext(estate);
1232
1233                 /*
1234                  * Execute the plan and obtain a tuple
1235                  */
1236                 slot = ExecProcNode(planstate);
1237
1238                 /*
1239                  * if the tuple is null, then we assume there is nothing more to
1240                  * process so we just end the loop...
1241                  */
1242                 if (TupIsNull(slot))
1243                         break;
1244
1245                 /*
1246                  * If we have a junk filter, then project a new tuple with the junk
1247                  * removed.
1248                  *
1249                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1250                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1251                  * because that tuple slot has the wrong descriptor.)
1252                  */
1253                 if (estate->es_junkFilter != NULL)
1254                         slot = ExecFilterJunk(estate->es_junkFilter, slot);
1255
1256                 /*
1257                  * If we are supposed to send the tuple somewhere, do so. (In
1258                  * practice, this is probably always the case at this point.)
1259                  */
1260                 if (sendTuples)
1261                         (*dest->receiveSlot) (slot, dest);
1262
1263                 /*
1264                  * Count tuples processed, if this is a SELECT.  (For other operation
1265                  * types, the ModifyTable plan node must count the appropriate
1266                  * events.)
1267                  */
1268                 if (operation == CMD_SELECT)
1269                         (estate->es_processed)++;
1270
1271                 /*
1272                  * check our tuple count.. if we've processed the proper number then
1273                  * quit, else loop again and process more tuples.  Zero numberTuples
1274                  * means no limit.
1275                  */
1276                 current_tuple_count++;
1277                 if (numberTuples && numberTuples == current_tuple_count)
1278                         break;
1279         }
1280 }
1281
1282
1283 /*
1284  * ExecRelCheck --- check that tuple meets constraints for result relation
1285  */
1286 static const char *
1287 ExecRelCheck(ResultRelInfo *resultRelInfo,
1288                          TupleTableSlot *slot, EState *estate)
1289 {
1290         Relation        rel = resultRelInfo->ri_RelationDesc;
1291         int                     ncheck = rel->rd_att->constr->num_check;
1292         ConstrCheck *check = rel->rd_att->constr->check;
1293         ExprContext *econtext;
1294         MemoryContext oldContext;
1295         List       *qual;
1296         int                     i;
1297
1298         /*
1299          * If first time through for this result relation, build expression
1300          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1301          * memory context so they'll survive throughout the query.
1302          */
1303         if (resultRelInfo->ri_ConstraintExprs == NULL)
1304         {
1305                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1306                 resultRelInfo->ri_ConstraintExprs =
1307                         (List **) palloc(ncheck * sizeof(List *));
1308                 for (i = 0; i < ncheck; i++)
1309                 {
1310                         /* ExecQual wants implicit-AND form */
1311                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1312                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1313                                 ExecPrepareExpr((Expr *) qual, estate);
1314                 }
1315                 MemoryContextSwitchTo(oldContext);
1316         }
1317
1318         /*
1319          * We will use the EState's per-tuple context for evaluating constraint
1320          * expressions (creating it if it's not already there).
1321          */
1322         econtext = GetPerTupleExprContext(estate);
1323
1324         /* Arrange for econtext's scan tuple to be the tuple under test */
1325         econtext->ecxt_scantuple = slot;
1326
1327         /* And evaluate the constraints */
1328         for (i = 0; i < ncheck; i++)
1329         {
1330                 qual = resultRelInfo->ri_ConstraintExprs[i];
1331
1332                 /*
1333                  * NOTE: SQL92 specifies that a NULL result from a constraint
1334                  * expression is not to be treated as a failure.  Therefore, tell
1335                  * ExecQual to return TRUE for NULL.
1336                  */
1337                 if (!ExecQual(qual, econtext, true))
1338                         return check[i].ccname;
1339         }
1340
1341         /* NULL result means no error */
1342         return NULL;
1343 }
1344
1345 void
1346 ExecConstraints(ResultRelInfo *resultRelInfo,
1347                                 TupleTableSlot *slot, EState *estate)
1348 {
1349         Relation        rel = resultRelInfo->ri_RelationDesc;
1350         TupleConstr *constr = rel->rd_att->constr;
1351
1352         Assert(constr);
1353
1354         if (constr->has_not_null)
1355         {
1356                 int                     natts = rel->rd_att->natts;
1357                 int                     attrChk;
1358
1359                 for (attrChk = 1; attrChk <= natts; attrChk++)
1360                 {
1361                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1362                                 slot_attisnull(slot, attrChk))
1363                                 ereport(ERROR,
1364                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1365                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1366                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1367                 }
1368         }
1369
1370         if (constr->num_check > 0)
1371         {
1372                 const char *failed;
1373
1374                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1375                         ereport(ERROR,
1376                                         (errcode(ERRCODE_CHECK_VIOLATION),
1377                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1378                                                         RelationGetRelationName(rel), failed)));
1379         }
1380 }
1381
1382
1383 /*
1384  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
1385  * process the updated version under READ COMMITTED rules.
1386  *
1387  * See backend/executor/README for some info about how this works.
1388  */
1389
1390
1391 /*
1392  * Check a modified tuple to see if we want to process its updated version
1393  * under READ COMMITTED rules.
1394  *
1395  *      estate - outer executor state data
1396  *      epqstate - state for EvalPlanQual rechecking
1397  *      relation - table containing tuple
1398  *      rti - rangetable index of table containing tuple
1399  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1400  *      priorXmax - t_xmax from the outdated tuple
1401  *
1402  * *tid is also an output parameter: it's modified to hold the TID of the
1403  * latest version of the tuple (note this may be changed even on failure)
1404  *
1405  * Returns a slot containing the new candidate update/delete tuple, or
1406  * NULL if we determine we shouldn't process the row.
1407  */
1408 TupleTableSlot *
1409 EvalPlanQual(EState *estate, EPQState *epqstate,
1410                          Relation relation, Index rti,
1411                          ItemPointer tid, TransactionId priorXmax)
1412 {
1413         TupleTableSlot *slot;
1414         HeapTuple       copyTuple;
1415
1416         Assert(rti > 0);
1417
1418         /*
1419          * Get and lock the updated version of the row; if fail, return NULL.
1420          */
1421         copyTuple = EvalPlanQualFetch(estate, relation, LockTupleExclusive,
1422                                                                   tid, priorXmax);
1423
1424         if (copyTuple == NULL)
1425                 return NULL;
1426
1427         /*
1428          * For UPDATE/DELETE we have to return tid of actual row we're executing
1429          * PQ for.
1430          */
1431         *tid = copyTuple->t_self;
1432
1433         /*
1434          * Need to run a recheck subquery.      Initialize or reinitialize EPQ state.
1435          */
1436         EvalPlanQualBegin(epqstate, estate);
1437
1438         /*
1439          * Free old test tuple, if any, and store new tuple where relation's scan
1440          * node will see it
1441          */
1442         EvalPlanQualSetTuple(epqstate, rti, copyTuple);
1443
1444         /*
1445          * Fetch any non-locked source rows
1446          */
1447         EvalPlanQualFetchRowMarks(epqstate);
1448
1449         /*
1450          * Run the EPQ query.  We assume it will return at most one tuple.
1451          */
1452         slot = EvalPlanQualNext(epqstate);
1453
1454         /*
1455          * If we got a tuple, force the slot to materialize the tuple so that it
1456          * is not dependent on any local state in the EPQ query (in particular,
1457          * it's highly likely that the slot contains references to any pass-by-ref
1458          * datums that may be present in copyTuple).  As with the next step, this
1459          * is to guard against early re-use of the EPQ query.
1460          */
1461         if (!TupIsNull(slot))
1462                 (void) ExecMaterializeSlot(slot);
1463
1464         /*
1465          * Clear out the test tuple.  This is needed in case the EPQ query is
1466          * re-used to test a tuple for a different relation.  (Not clear that can
1467          * really happen, but let's be safe.)
1468          */
1469         EvalPlanQualSetTuple(epqstate, rti, NULL);
1470
1471         return slot;
1472 }
1473
1474 /*
1475  * Fetch a copy of the newest version of an outdated tuple
1476  *
1477  *      estate - executor state data
1478  *      relation - table containing tuple
1479  *      lockmode - requested tuple lock mode
1480  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1481  *      priorXmax - t_xmax from the outdated tuple
1482  *
1483  * Returns a palloc'd copy of the newest tuple version, or NULL if we find
1484  * that there is no newest version (ie, the row was deleted not updated).
1485  * If successful, we have locked the newest tuple version, so caller does not
1486  * need to worry about it changing anymore.
1487  *
1488  * Note: properly, lockmode should be declared as enum LockTupleMode,
1489  * but we use "int" to avoid having to include heapam.h in executor.h.
1490  */
1491 HeapTuple
1492 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
1493                                   ItemPointer tid, TransactionId priorXmax)
1494 {
1495         HeapTuple       copyTuple = NULL;
1496         HeapTupleData tuple;
1497         SnapshotData SnapshotDirty;
1498
1499         /*
1500          * fetch target tuple
1501          *
1502          * Loop here to deal with updated or busy tuples
1503          */
1504         InitDirtySnapshot(SnapshotDirty);
1505         tuple.t_self = *tid;
1506         for (;;)
1507         {
1508                 Buffer          buffer;
1509
1510                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
1511                 {
1512                         HTSU_Result test;
1513                         ItemPointerData update_ctid;
1514                         TransactionId update_xmax;
1515
1516                         /*
1517                          * If xmin isn't what we're expecting, the slot must have been
1518                          * recycled and reused for an unrelated tuple.  This implies that
1519                          * the latest version of the row was deleted, so we need do
1520                          * nothing.  (Should be safe to examine xmin without getting
1521                          * buffer's content lock, since xmin never changes in an existing
1522                          * tuple.)
1523                          */
1524                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1525                                                                          priorXmax))
1526                         {
1527                                 ReleaseBuffer(buffer);
1528                                 return NULL;
1529                         }
1530
1531                         /* otherwise xmin should not be dirty... */
1532                         if (TransactionIdIsValid(SnapshotDirty.xmin))
1533                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1534
1535                         /*
1536                          * If tuple is being updated by other transaction then we have to
1537                          * wait for its commit/abort.
1538                          */
1539                         if (TransactionIdIsValid(SnapshotDirty.xmax))
1540                         {
1541                                 ReleaseBuffer(buffer);
1542                                 XactLockTableWait(SnapshotDirty.xmax);
1543                                 continue;               /* loop back to repeat heap_fetch */
1544                         }
1545
1546                         /*
1547                          * If tuple was inserted by our own transaction, we have to check
1548                          * cmin against es_output_cid: cmin >= current CID means our
1549                          * command cannot see the tuple, so we should ignore it.  Without
1550                          * this we are open to the "Halloween problem" of indefinitely
1551                          * re-updating the same tuple. (We need not check cmax because
1552                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
1553                          * transaction dead, regardless of cmax.)  We just checked that
1554                          * priorXmax == xmin, so we can test that variable instead of
1555                          * doing HeapTupleHeaderGetXmin again.
1556                          */
1557                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1558                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
1559                         {
1560                                 ReleaseBuffer(buffer);
1561                                 return NULL;
1562                         }
1563
1564                         /*
1565                          * This is a live tuple, so now try to lock it.
1566                          */
1567                         test = heap_lock_tuple(relation, &tuple, &buffer,
1568                                                                    &update_ctid, &update_xmax,
1569                                                                    estate->es_output_cid,
1570                                                                    lockmode, false);
1571                         /* We now have two pins on the buffer, get rid of one */
1572                         ReleaseBuffer(buffer);
1573
1574                         switch (test)
1575                         {
1576                                 case HeapTupleSelfUpdated:
1577                                         /* treat it as deleted; do not process */
1578                                         ReleaseBuffer(buffer);
1579                                         return NULL;
1580
1581                                 case HeapTupleMayBeUpdated:
1582                                         /* successfully locked */
1583                                         break;
1584
1585                                 case HeapTupleUpdated:
1586                                         ReleaseBuffer(buffer);
1587                                         if (IsolationUsesXactSnapshot())
1588                                                 ereport(ERROR,
1589                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1590                                                                  errmsg("could not serialize access due to concurrent update")));
1591                                         if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
1592                                         {
1593                                                 /* it was updated, so look at the updated version */
1594                                                 tuple.t_self = update_ctid;
1595                                                 /* updated row should have xmin matching this xmax */
1596                                                 priorXmax = update_xmax;
1597                                                 continue;
1598                                         }
1599                                         /* tuple was deleted, so give up */
1600                                         return NULL;
1601
1602                                 default:
1603                                         ReleaseBuffer(buffer);
1604                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1605                                                  test);
1606                                         return NULL;    /* keep compiler quiet */
1607                         }
1608
1609                         /*
1610                          * We got tuple - now copy it for use by recheck query.
1611                          */
1612                         copyTuple = heap_copytuple(&tuple);
1613                         ReleaseBuffer(buffer);
1614                         break;
1615                 }
1616
1617                 /*
1618                  * If the referenced slot was actually empty, the latest version of
1619                  * the row must have been deleted, so we need do nothing.
1620                  */
1621                 if (tuple.t_data == NULL)
1622                 {
1623                         ReleaseBuffer(buffer);
1624                         return NULL;
1625                 }
1626
1627                 /*
1628                  * As above, if xmin isn't what we're expecting, do nothing.
1629                  */
1630                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1631                                                                  priorXmax))
1632                 {
1633                         ReleaseBuffer(buffer);
1634                         return NULL;
1635                 }
1636
1637                 /*
1638                  * If we get here, the tuple was found but failed SnapshotDirty.
1639                  * Assuming the xmin is either a committed xact or our own xact (as it
1640                  * certainly should be if we're trying to modify the tuple), this must
1641                  * mean that the row was updated or deleted by either a committed xact
1642                  * or our own xact.  If it was deleted, we can ignore it; if it was
1643                  * updated then chain up to the next version and repeat the whole
1644                  * process.
1645                  *
1646                  * As above, it should be safe to examine xmax and t_ctid without the
1647                  * buffer content lock, because they can't be changing.
1648                  */
1649                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
1650                 {
1651                         /* deleted, so forget about it */
1652                         ReleaseBuffer(buffer);
1653                         return NULL;
1654                 }
1655
1656                 /* updated, so look at the updated row */
1657                 tuple.t_self = tuple.t_data->t_ctid;
1658                 /* updated row should have xmin matching this xmax */
1659                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
1660                 ReleaseBuffer(buffer);
1661                 /* loop back to fetch next in chain */
1662         }
1663
1664         /*
1665          * Return the copied tuple
1666          */
1667         return copyTuple;
1668 }
1669
1670 /*
1671  * EvalPlanQualInit -- initialize during creation of a plan state node
1672  * that might need to invoke EPQ processing.
1673  * Note: subplan can be NULL if it will be set later with EvalPlanQualSetPlan.
1674  */
1675 void
1676 EvalPlanQualInit(EPQState *epqstate, EState *estate,
1677                                  Plan *subplan, int epqParam)
1678 {
1679         /* Mark the EPQ state inactive */
1680         epqstate->estate = NULL;
1681         epqstate->planstate = NULL;
1682         epqstate->origslot = NULL;
1683         /* ... and remember data that EvalPlanQualBegin will need */
1684         epqstate->plan = subplan;
1685         epqstate->rowMarks = NIL;
1686         epqstate->epqParam = epqParam;
1687 }
1688
1689 /*
1690  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
1691  *
1692  * We need this so that ModifyTuple can deal with multiple subplans.
1693  */
1694 void
1695 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan)
1696 {
1697         /* If we have a live EPQ query, shut it down */
1698         EvalPlanQualEnd(epqstate);
1699         /* And set/change the plan pointer */
1700         epqstate->plan = subplan;
1701 }
1702
1703 /*
1704  * EvalPlanQualAddRowMark -- add an ExecRowMark that EPQ needs to handle.
1705  *
1706  * Currently, only non-locking RowMarks are supported.
1707  */
1708 void
1709 EvalPlanQualAddRowMark(EPQState *epqstate, ExecRowMark *erm)
1710 {
1711         if (RowMarkRequiresRowShareLock(erm->markType))
1712                 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
1713         epqstate->rowMarks = lappend(epqstate->rowMarks, erm);
1714 }
1715
1716 /*
1717  * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
1718  *
1719  * NB: passed tuple must be palloc'd; it may get freed later
1720  */
1721 void
1722 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
1723 {
1724         EState     *estate = epqstate->estate;
1725
1726         Assert(rti > 0);
1727
1728         /*
1729          * free old test tuple, if any, and store new tuple where relation's scan
1730          * node will see it
1731          */
1732         if (estate->es_epqTuple[rti - 1] != NULL)
1733                 heap_freetuple(estate->es_epqTuple[rti - 1]);
1734         estate->es_epqTuple[rti - 1] = tuple;
1735         estate->es_epqTupleSet[rti - 1] = true;
1736 }
1737
1738 /*
1739  * Fetch back the current test tuple (if any) for the specified RTI
1740  */
1741 HeapTuple
1742 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
1743 {
1744         EState     *estate = epqstate->estate;
1745
1746         Assert(rti > 0);
1747
1748         return estate->es_epqTuple[rti - 1];
1749 }
1750
1751 /*
1752  * Fetch the current row values for any non-locked relations that need
1753  * to be scanned by an EvalPlanQual operation.  origslot must have been set
1754  * to contain the current result row (top-level row) that we need to recheck.
1755  */
1756 void
1757 EvalPlanQualFetchRowMarks(EPQState *epqstate)
1758 {
1759         ListCell   *l;
1760
1761         Assert(epqstate->origslot != NULL);
1762
1763         foreach(l, epqstate->rowMarks)
1764         {
1765                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1766                 Datum           datum;
1767                 bool            isNull;
1768                 HeapTupleData tuple;
1769
1770                 /* clear any leftover test tuple for this rel */
1771                 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
1772
1773                 if (erm->relation)
1774                 {
1775                         Buffer          buffer;
1776
1777                         Assert(erm->markType == ROW_MARK_REFERENCE);
1778
1779                         /* if child rel, must check whether it produced this row */
1780                         if (erm->rti != erm->prti)
1781                         {
1782                                 Oid                     tableoid;
1783
1784                                 datum = ExecGetJunkAttribute(epqstate->origslot,
1785                                                                                          erm->toidAttNo,
1786                                                                                          &isNull);
1787                                 /* non-locked rels could be on the inside of outer joins */
1788                                 if (isNull)
1789                                         continue;
1790                                 tableoid = DatumGetObjectId(datum);
1791
1792                                 if (tableoid != RelationGetRelid(erm->relation))
1793                                 {
1794                                         /* this child is inactive right now */
1795                                         continue;
1796                                 }
1797                         }
1798
1799                         /* fetch the tuple's ctid */
1800                         datum = ExecGetJunkAttribute(epqstate->origslot,
1801                                                                                  erm->ctidAttNo,
1802                                                                                  &isNull);
1803                         /* non-locked rels could be on the inside of outer joins */
1804                         if (isNull)
1805                                 continue;
1806                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1807
1808                         /* okay, fetch the tuple */
1809                         if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
1810                                                         false, NULL))
1811                                 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
1812
1813                         /* successful, copy and store tuple */
1814                         EvalPlanQualSetTuple(epqstate, erm->rti,
1815                                                                  heap_copytuple(&tuple));
1816                         ReleaseBuffer(buffer);
1817                 }
1818                 else
1819                 {
1820                         HeapTupleHeader td;
1821
1822                         Assert(erm->markType == ROW_MARK_COPY);
1823
1824                         /* fetch the whole-row Var for the relation */
1825                         datum = ExecGetJunkAttribute(epqstate->origslot,
1826                                                                                  erm->wholeAttNo,
1827                                                                                  &isNull);
1828                         /* non-locked rels could be on the inside of outer joins */
1829                         if (isNull)
1830                                 continue;
1831                         td = DatumGetHeapTupleHeader(datum);
1832
1833                         /* build a temporary HeapTuple control structure */
1834                         tuple.t_len = HeapTupleHeaderGetDatumLength(td);
1835                         ItemPointerSetInvalid(&(tuple.t_self));
1836                         tuple.t_tableOid = InvalidOid;
1837                         tuple.t_data = td;
1838
1839                         /* copy and store tuple */
1840                         EvalPlanQualSetTuple(epqstate, erm->rti,
1841                                                                  heap_copytuple(&tuple));
1842                 }
1843         }
1844 }
1845
1846 /*
1847  * Fetch the next row (if any) from EvalPlanQual testing
1848  *
1849  * (In practice, there should never be more than one row...)
1850  */
1851 TupleTableSlot *
1852 EvalPlanQualNext(EPQState *epqstate)
1853 {
1854         MemoryContext oldcontext;
1855         TupleTableSlot *slot;
1856
1857         oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
1858         slot = ExecProcNode(epqstate->planstate);
1859         MemoryContextSwitchTo(oldcontext);
1860
1861         return slot;
1862 }
1863
1864 /*
1865  * Initialize or reset an EvalPlanQual state tree
1866  */
1867 void
1868 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
1869 {
1870         EState     *estate = epqstate->estate;
1871
1872         if (estate == NULL)
1873         {
1874                 /* First time through, so create a child EState */
1875                 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
1876         }
1877         else
1878         {
1879                 /*
1880                  * We already have a suitable child EPQ tree, so just reset it.
1881                  */
1882                 int                     rtsize = list_length(parentestate->es_range_table);
1883                 PlanState  *planstate = epqstate->planstate;
1884
1885                 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
1886
1887                 /* Recopy current values of parent parameters */
1888                 if (parentestate->es_plannedstmt->nParamExec > 0)
1889                 {
1890                         int                     i = parentestate->es_plannedstmt->nParamExec;
1891
1892                         while (--i >= 0)
1893                         {
1894                                 /* copy value if any, but not execPlan link */
1895                                 estate->es_param_exec_vals[i].value =
1896                                         parentestate->es_param_exec_vals[i].value;
1897                                 estate->es_param_exec_vals[i].isnull =
1898                                         parentestate->es_param_exec_vals[i].isnull;
1899                         }
1900                 }
1901
1902                 /*
1903                  * Mark child plan tree as needing rescan at all scan nodes.  The
1904                  * first ExecProcNode will take care of actually doing the rescan.
1905                  */
1906                 planstate->chgParam = bms_add_member(planstate->chgParam,
1907                                                                                          epqstate->epqParam);
1908         }
1909 }
1910
1911 /*
1912  * Start execution of an EvalPlanQual plan tree.
1913  *
1914  * This is a cut-down version of ExecutorStart(): we copy some state from
1915  * the top-level estate rather than initializing it fresh.
1916  */
1917 static void
1918 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
1919 {
1920         EState     *estate;
1921         int                     rtsize;
1922         MemoryContext oldcontext;
1923         ListCell   *l;
1924
1925         rtsize = list_length(parentestate->es_range_table);
1926
1927         epqstate->estate = estate = CreateExecutorState();
1928
1929         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1930
1931         /*
1932          * Child EPQ EStates share the parent's copy of unchanging state such as
1933          * the snapshot, rangetable, result-rel info, and external Param info.
1934          * They need their own copies of local state, including a tuple table,
1935          * es_param_exec_vals, etc.
1936          */
1937         estate->es_direction = ForwardScanDirection;
1938         estate->es_snapshot = parentestate->es_snapshot;
1939         estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
1940         estate->es_range_table = parentestate->es_range_table;
1941         estate->es_plannedstmt = parentestate->es_plannedstmt;
1942         estate->es_junkFilter = parentestate->es_junkFilter;
1943         estate->es_output_cid = parentestate->es_output_cid;
1944         estate->es_result_relations = parentestate->es_result_relations;
1945         estate->es_num_result_relations = parentestate->es_num_result_relations;
1946         estate->es_result_relation_info = parentestate->es_result_relation_info;
1947         /* es_trig_target_relations must NOT be copied */
1948         estate->es_rowMarks = parentestate->es_rowMarks;
1949         estate->es_instrument = parentestate->es_instrument;
1950         estate->es_select_into = parentestate->es_select_into;
1951         estate->es_into_oids = parentestate->es_into_oids;
1952
1953         /*
1954          * The external param list is simply shared from parent.  The internal
1955          * param workspace has to be local state, but we copy the initial values
1956          * from the parent, so as to have access to any param values that were
1957          * already set from other parts of the parent's plan tree.
1958          */
1959         estate->es_param_list_info = parentestate->es_param_list_info;
1960         if (parentestate->es_plannedstmt->nParamExec > 0)
1961         {
1962                 int                     i = parentestate->es_plannedstmt->nParamExec;
1963
1964                 estate->es_param_exec_vals = (ParamExecData *)
1965                         palloc0(i * sizeof(ParamExecData));
1966                 while (--i >= 0)
1967                 {
1968                         /* copy value if any, but not execPlan link */
1969                         estate->es_param_exec_vals[i].value =
1970                                 parentestate->es_param_exec_vals[i].value;
1971                         estate->es_param_exec_vals[i].isnull =
1972                                 parentestate->es_param_exec_vals[i].isnull;
1973                 }
1974         }
1975
1976         /*
1977          * Each EState must have its own es_epqScanDone state, but if we have
1978          * nested EPQ checks they should share es_epqTuple arrays.      This allows
1979          * sub-rechecks to inherit the values being examined by an outer recheck.
1980          */
1981         estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
1982         if (parentestate->es_epqTuple != NULL)
1983         {
1984                 estate->es_epqTuple = parentestate->es_epqTuple;
1985                 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
1986         }
1987         else
1988         {
1989                 estate->es_epqTuple = (HeapTuple *)
1990                         palloc0(rtsize * sizeof(HeapTuple));
1991                 estate->es_epqTupleSet = (bool *)
1992                         palloc0(rtsize * sizeof(bool));
1993         }
1994
1995         /*
1996          * Each estate also has its own tuple table.
1997          */
1998         estate->es_tupleTable = NIL;
1999
2000         /*
2001          * Initialize private state information for each SubPlan.  We must do this
2002          * before running ExecInitNode on the main query tree, since
2003          * ExecInitSubPlan expects to be able to find these entries. Some of the
2004          * SubPlans might not be used in the part of the plan tree we intend to
2005          * run, but since it's not easy to tell which, we just initialize them
2006          * all.
2007          */
2008         Assert(estate->es_subplanstates == NIL);
2009         foreach(l, parentestate->es_plannedstmt->subplans)
2010         {
2011                 Plan       *subplan = (Plan *) lfirst(l);
2012                 PlanState  *subplanstate;
2013
2014                 subplanstate = ExecInitNode(subplan, estate, 0);
2015
2016                 estate->es_subplanstates = lappend(estate->es_subplanstates,
2017                                                                                    subplanstate);
2018         }
2019
2020         /*
2021          * Initialize the private state information for all the nodes in the part
2022          * of the plan tree we need to run.  This opens files, allocates storage
2023          * and leaves us ready to start processing tuples.
2024          */
2025         epqstate->planstate = ExecInitNode(planTree, estate, 0);
2026
2027         MemoryContextSwitchTo(oldcontext);
2028 }
2029
2030 /*
2031  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
2032  * or if we are done with the current EPQ child.
2033  *
2034  * This is a cut-down version of ExecutorEnd(); basically we want to do most
2035  * of the normal cleanup, but *not* close result relations (which we are
2036  * just sharing from the outer query).  We do, however, have to close any
2037  * trigger target relations that got opened, since those are not shared.
2038  * (There probably shouldn't be any of the latter, but just in case...)
2039  */
2040 void
2041 EvalPlanQualEnd(EPQState *epqstate)
2042 {
2043         EState     *estate = epqstate->estate;
2044         MemoryContext oldcontext;
2045         ListCell   *l;
2046
2047         if (estate == NULL)
2048                 return;                                 /* idle, so nothing to do */
2049
2050         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2051
2052         ExecEndNode(epqstate->planstate);
2053
2054         foreach(l, estate->es_subplanstates)
2055         {
2056                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2057
2058                 ExecEndNode(subplanstate);
2059         }
2060
2061         /* throw away the per-estate tuple table */
2062         ExecResetTupleTable(estate->es_tupleTable, false);
2063
2064         /* close any trigger target relations attached to this EState */
2065         foreach(l, estate->es_trig_target_relations)
2066         {
2067                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2068
2069                 /* Close indices and then the relation itself */
2070                 ExecCloseIndices(resultRelInfo);
2071                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2072         }
2073
2074         MemoryContextSwitchTo(oldcontext);
2075
2076         FreeExecutorState(estate);
2077
2078         /* Mark EPQState idle */
2079         epqstate->estate = NULL;
2080         epqstate->planstate = NULL;
2081         epqstate->origslot = NULL;
2082 }
2083
2084
2085 /*
2086  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2087  *
2088  * We implement SELECT INTO by diverting SELECT's normal output with
2089  * a specialized DestReceiver type.
2090  */
2091
2092 typedef struct
2093 {
2094         DestReceiver pub;                       /* publicly-known function pointers */
2095         EState     *estate;                     /* EState we are working with */
2096         Relation        rel;                    /* Relation to write to */
2097         int                     hi_options;             /* heap_insert performance options */
2098         BulkInsertState bistate;        /* bulk insert state */
2099 } DR_intorel;
2100
2101 /*
2102  * OpenIntoRel --- actually create the SELECT INTO target relation
2103  *
2104  * This also replaces QueryDesc->dest with the special DestReceiver for
2105  * SELECT INTO.  We assume that the correct result tuple type has already
2106  * been placed in queryDesc->tupDesc.
2107  */
2108 static void
2109 OpenIntoRel(QueryDesc *queryDesc)
2110 {
2111         IntoClause *into = queryDesc->plannedstmt->intoClause;
2112         EState     *estate = queryDesc->estate;
2113         Relation        intoRelationDesc;
2114         char       *intoName;
2115         Oid                     namespaceId;
2116         Oid                     tablespaceId;
2117         Datum           reloptions;
2118         AclResult       aclresult;
2119         Oid                     intoRelationId;
2120         TupleDesc       tupdesc;
2121         DR_intorel *myState;
2122         static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
2123
2124         Assert(into);
2125
2126         /*
2127          * XXX This code needs to be kept in sync with DefineRelation(). Maybe we
2128          * should try to use that function instead.
2129          */
2130
2131         /*
2132          * Check consistency of arguments
2133          */
2134         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2135                 ereport(ERROR,
2136                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2137                                  errmsg("ON COMMIT can only be used on temporary tables")));
2138
2139         /*
2140          * Security check: disallow creating temp tables from security-restricted
2141          * code.  This is needed because calling code might not expect untrusted
2142          * tables to appear in pg_temp at the front of its search path.
2143          */
2144         if (into->rel->istemp && InSecurityRestrictedOperation())
2145                 ereport(ERROR,
2146                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2147                                  errmsg("cannot create temporary table within security-restricted operation")));
2148
2149         /*
2150          * Find namespace to create in, check its permissions
2151          */
2152         intoName = into->rel->relname;
2153         namespaceId = RangeVarGetCreationNamespace(into->rel);
2154
2155         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2156                                                                           ACL_CREATE);
2157         if (aclresult != ACLCHECK_OK)
2158                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2159                                            get_namespace_name(namespaceId));
2160
2161         /*
2162          * Select tablespace to use.  If not specified, use default tablespace
2163          * (which may in turn default to database's default).
2164          */
2165         if (into->tableSpaceName)
2166         {
2167                 tablespaceId = get_tablespace_oid(into->tableSpaceName, false);
2168         }
2169         else
2170         {
2171                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2172                 /* note InvalidOid is OK in this case */
2173         }
2174
2175         /* Check permissions except when using the database's default space */
2176         if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2177         {
2178                 AclResult       aclresult;
2179
2180                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2181                                                                                    ACL_CREATE);
2182
2183                 if (aclresult != ACLCHECK_OK)
2184                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2185                                                    get_tablespace_name(tablespaceId));
2186         }
2187
2188         /* Parse and validate any reloptions */
2189         reloptions = transformRelOptions((Datum) 0,
2190                                                                          into->options,
2191                                                                          NULL,
2192                                                                          validnsps,
2193                                                                          true,
2194                                                                          false);
2195         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2196
2197         /* Copy the tupdesc because heap_create_with_catalog modifies it */
2198         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2199
2200         /* Now we can actually create the new relation */
2201         intoRelationId = heap_create_with_catalog(intoName,
2202                                                                                           namespaceId,
2203                                                                                           tablespaceId,
2204                                                                                           InvalidOid,
2205                                                                                           InvalidOid,
2206                                                                                           InvalidOid,
2207                                                                                           GetUserId(),
2208                                                                                           tupdesc,
2209                                                                                           NIL,
2210                                                                                           RELKIND_RELATION,
2211                                                                                           false,
2212                                                                                           false,
2213                                                                                           true,
2214                                                                                           0,
2215                                                                                           into->onCommit,
2216                                                                                           reloptions,
2217                                                                                           true,
2218                                                                                           allowSystemTableMods,
2219                                                                                           false);
2220         Assert(intoRelationId != InvalidOid);
2221
2222         FreeTupleDesc(tupdesc);
2223
2224         /*
2225          * Advance command counter so that the newly-created relation's catalog
2226          * tuples will be visible to heap_open.
2227          */
2228         CommandCounterIncrement();
2229
2230         /*
2231          * If necessary, create a TOAST table for the INTO relation. Note that
2232          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2233          * the TOAST table will be visible for insertion.
2234          */
2235         reloptions = transformRelOptions((Datum) 0,
2236                                                                          into->options,
2237                                                                          "toast",
2238                                                                          validnsps,
2239                                                                          true,
2240                                                                          false);
2241
2242         (void) heap_reloptions(RELKIND_TOASTVALUE, reloptions, true);
2243
2244         AlterTableCreateToastTable(intoRelationId, reloptions);
2245
2246         /*
2247          * And open the constructed table for writing.
2248          */
2249         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2250
2251         /*
2252          * Now replace the query's DestReceiver with one for SELECT INTO
2253          */
2254         queryDesc->dest = CreateDestReceiver(DestIntoRel);
2255         myState = (DR_intorel *) queryDesc->dest;
2256         Assert(myState->pub.mydest == DestIntoRel);
2257         myState->estate = estate;
2258         myState->rel = intoRelationDesc;
2259
2260         /*
2261          * We can skip WAL-logging the insertions, unless PITR or streaming
2262          * replication is in use. We can skip the FSM in any case.
2263          */
2264         myState->hi_options = HEAP_INSERT_SKIP_FSM |
2265                 (XLogIsNeeded() ? 0 : HEAP_INSERT_SKIP_WAL);
2266         myState->bistate = GetBulkInsertState();
2267
2268         /* Not using WAL requires smgr_targblock be initially invalid */
2269         Assert(RelationGetTargetBlock(intoRelationDesc) == InvalidBlockNumber);
2270 }
2271
2272 /*
2273  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2274  */
2275 static void
2276 CloseIntoRel(QueryDesc *queryDesc)
2277 {
2278         DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2279
2280         /* OpenIntoRel might never have gotten called */
2281         if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2282         {
2283                 FreeBulkInsertState(myState->bistate);
2284
2285                 /* If we skipped using WAL, must heap_sync before commit */
2286                 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2287                         heap_sync(myState->rel);
2288
2289                 /* close rel, but keep lock until commit */
2290                 heap_close(myState->rel, NoLock);
2291
2292                 myState->rel = NULL;
2293         }
2294 }
2295
2296 /*
2297  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2298  */
2299 DestReceiver *
2300 CreateIntoRelDestReceiver(void)
2301 {
2302         DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2303
2304         self->pub.receiveSlot = intorel_receive;
2305         self->pub.rStartup = intorel_startup;
2306         self->pub.rShutdown = intorel_shutdown;
2307         self->pub.rDestroy = intorel_destroy;
2308         self->pub.mydest = DestIntoRel;
2309
2310         /* private fields will be set by OpenIntoRel */
2311
2312         return (DestReceiver *) self;
2313 }
2314
2315 /*
2316  * intorel_startup --- executor startup
2317  */
2318 static void
2319 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2320 {
2321         /* no-op */
2322 }
2323
2324 /*
2325  * intorel_receive --- receive one tuple
2326  */
2327 static void
2328 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2329 {
2330         DR_intorel *myState = (DR_intorel *) self;
2331         HeapTuple       tuple;
2332
2333         /*
2334          * get the heap tuple out of the tuple table slot, making sure we have a
2335          * writable copy
2336          */
2337         tuple = ExecMaterializeSlot(slot);
2338
2339         /*
2340          * force assignment of new OID (see comments in ExecInsert)
2341          */
2342         if (myState->rel->rd_rel->relhasoids)
2343                 HeapTupleSetOid(tuple, InvalidOid);
2344
2345         heap_insert(myState->rel,
2346                                 tuple,
2347                                 myState->estate->es_output_cid,
2348                                 myState->hi_options,
2349                                 myState->bistate);
2350
2351         /* We know this is a newly created relation, so there are no indexes */
2352 }
2353
2354 /*
2355  * intorel_shutdown --- executor end
2356  */
2357 static void
2358 intorel_shutdown(DestReceiver *self)
2359 {
2360         /* no-op */
2361 }
2362
2363 /*
2364  * intorel_destroy --- release DestReceiver object
2365  */
2366 static void
2367 intorel_destroy(DestReceiver *self)
2368 {
2369         pfree(self);
2370 }