OSDN Git Service

Preserve relfilenodes:
[pg-rex/syncrep.git] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.340 2010/01/06 03:04:01 momjian Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/reloptions.h"
36 #include "access/sysattr.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "miscadmin.h"
47 #include "optimizer/clauses.h"
48 #include "parser/parse_clause.h"
49 #include "parser/parsetree.h"
50 #include "storage/bufmgr.h"
51 #include "storage/lmgr.h"
52 #include "utils/acl.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55 #include "utils/snapmgr.h"
56 #include "utils/tqual.h"
57
58
59 /* Hooks for plugins to get control in ExecutorStart/Run/End() */
60 ExecutorStart_hook_type ExecutorStart_hook = NULL;
61 ExecutorRun_hook_type ExecutorRun_hook = NULL;
62 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
63
64 /* decls for local routines only used within this module */
65 static void InitPlan(QueryDesc *queryDesc, int eflags);
66 static void ExecEndPlan(PlanState *planstate, EState *estate);
67 static void ExecutePlan(EState *estate, PlanState *planstate,
68                         CmdType operation,
69                         bool sendTuples,
70                         long numberTuples,
71                         ScanDirection direction,
72                         DestReceiver *dest);
73 static void ExecCheckRTPerms(List *rangeTable);
74 static void ExecCheckRTEPerms(RangeTblEntry *rte);
75 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
76 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
77                                                           Plan *planTree);
78 static void OpenIntoRel(QueryDesc *queryDesc);
79 static void CloseIntoRel(QueryDesc *queryDesc);
80 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
81 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
82 static void intorel_shutdown(DestReceiver *self);
83 static void intorel_destroy(DestReceiver *self);
84
85 /* end of local decls */
86
87
88 /* ----------------------------------------------------------------
89  *              ExecutorStart
90  *
91  *              This routine must be called at the beginning of any execution of any
92  *              query plan
93  *
94  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
95  * clear why we bother to separate the two functions, but...).  The tupDesc
96  * field of the QueryDesc is filled in to describe the tuples that will be
97  * returned, and the internal fields (estate and planstate) are set up.
98  *
99  * eflags contains flag bits as described in executor.h.
100  *
101  * NB: the CurrentMemoryContext when this is called will become the parent
102  * of the per-query context used for this Executor invocation.
103  *
104  * We provide a function hook variable that lets loadable plugins
105  * get control when ExecutorStart is called.  Such a plugin would
106  * normally call standard_ExecutorStart().
107  *
108  * ----------------------------------------------------------------
109  */
110 void
111 ExecutorStart(QueryDesc *queryDesc, int eflags)
112 {
113         if (ExecutorStart_hook)
114                 (*ExecutorStart_hook) (queryDesc, eflags);
115         else
116                 standard_ExecutorStart(queryDesc, eflags);
117 }
118
119 void
120 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
121 {
122         EState     *estate;
123         MemoryContext oldcontext;
124
125         /* sanity checks: queryDesc must not be started already */
126         Assert(queryDesc != NULL);
127         Assert(queryDesc->estate == NULL);
128
129         /*
130          * If the transaction is read-only, we need to check if any writes are
131          * planned to non-temporary tables.  EXPLAIN is considered read-only.
132          */
133         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
134                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
135
136         /*
137          * Build EState, switch into per-query memory context for startup.
138          */
139         estate = CreateExecutorState();
140         queryDesc->estate = estate;
141
142         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
143
144         /*
145          * Fill in external parameters, if any, from queryDesc; and allocate
146          * workspace for internal parameters
147          */
148         estate->es_param_list_info = queryDesc->params;
149
150         if (queryDesc->plannedstmt->nParamExec > 0)
151                 estate->es_param_exec_vals = (ParamExecData *)
152                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
153
154         /*
155          * If non-read-only query, set the command ID to mark output tuples with
156          */
157         switch (queryDesc->operation)
158         {
159                 case CMD_SELECT:
160                         /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
161                         if (queryDesc->plannedstmt->intoClause != NULL ||
162                                 queryDesc->plannedstmt->rowMarks != NIL)
163                                 estate->es_output_cid = GetCurrentCommandId(true);
164                         break;
165
166                 case CMD_INSERT:
167                 case CMD_DELETE:
168                 case CMD_UPDATE:
169                         estate->es_output_cid = GetCurrentCommandId(true);
170                         break;
171
172                 default:
173                         elog(ERROR, "unrecognized operation code: %d",
174                                  (int) queryDesc->operation);
175                         break;
176         }
177
178         /*
179          * Copy other important information into the EState
180          */
181         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
182         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
183         estate->es_instrument = queryDesc->instrument_options;
184
185         /*
186          * Initialize the plan state tree
187          */
188         InitPlan(queryDesc, eflags);
189
190         MemoryContextSwitchTo(oldcontext);
191 }
192
193 /* ----------------------------------------------------------------
194  *              ExecutorRun
195  *
196  *              This is the main routine of the executor module. It accepts
197  *              the query descriptor from the traffic cop and executes the
198  *              query plan.
199  *
200  *              ExecutorStart must have been called already.
201  *
202  *              If direction is NoMovementScanDirection then nothing is done
203  *              except to start up/shut down the destination.  Otherwise,
204  *              we retrieve up to 'count' tuples in the specified direction.
205  *
206  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
207  *              completion.
208  *
209  *              There is no return value, but output tuples (if any) are sent to
210  *              the destination receiver specified in the QueryDesc; and the number
211  *              of tuples processed at the top level can be found in
212  *              estate->es_processed.
213  *
214  *              We provide a function hook variable that lets loadable plugins
215  *              get control when ExecutorRun is called.  Such a plugin would
216  *              normally call standard_ExecutorRun().
217  *
218  * ----------------------------------------------------------------
219  */
220 void
221 ExecutorRun(QueryDesc *queryDesc,
222                         ScanDirection direction, long count)
223 {
224         if (ExecutorRun_hook)
225                 (*ExecutorRun_hook) (queryDesc, direction, count);
226         else
227                 standard_ExecutorRun(queryDesc, direction, count);
228 }
229
230 void
231 standard_ExecutorRun(QueryDesc *queryDesc,
232                                          ScanDirection direction, long count)
233 {
234         EState     *estate;
235         CmdType         operation;
236         DestReceiver *dest;
237         bool            sendTuples;
238         MemoryContext oldcontext;
239
240         /* sanity checks */
241         Assert(queryDesc != NULL);
242
243         estate = queryDesc->estate;
244
245         Assert(estate != NULL);
246
247         /*
248          * Switch into per-query memory context
249          */
250         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
251
252         /* Allow instrumentation of ExecutorRun overall runtime */
253         if (queryDesc->totaltime)
254                 InstrStartNode(queryDesc->totaltime);
255
256         /*
257          * extract information from the query descriptor and the query feature.
258          */
259         operation = queryDesc->operation;
260         dest = queryDesc->dest;
261
262         /*
263          * startup tuple receiver, if we will be emitting tuples
264          */
265         estate->es_processed = 0;
266         estate->es_lastoid = InvalidOid;
267
268         sendTuples = (operation == CMD_SELECT ||
269                                   queryDesc->plannedstmt->hasReturning);
270
271         if (sendTuples)
272                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
273
274         /*
275          * run plan
276          */
277         if (!ScanDirectionIsNoMovement(direction))
278                 ExecutePlan(estate,
279                                         queryDesc->planstate,
280                                         operation,
281                                         sendTuples,
282                                         count,
283                                         direction,
284                                         dest);
285
286         /*
287          * shutdown tuple receiver, if we started it
288          */
289         if (sendTuples)
290                 (*dest->rShutdown) (dest);
291
292         if (queryDesc->totaltime)
293                 InstrStopNode(queryDesc->totaltime, estate->es_processed);
294
295         MemoryContextSwitchTo(oldcontext);
296 }
297
298 /* ----------------------------------------------------------------
299  *              ExecutorEnd
300  *
301  *              This routine must be called at the end of execution of any
302  *              query plan
303  *
304  *              We provide a function hook variable that lets loadable plugins
305  *              get control when ExecutorEnd is called.  Such a plugin would
306  *              normally call standard_ExecutorEnd().
307  *
308  * ----------------------------------------------------------------
309  */
310 void
311 ExecutorEnd(QueryDesc *queryDesc)
312 {
313         if (ExecutorEnd_hook)
314                 (*ExecutorEnd_hook) (queryDesc);
315         else
316                 standard_ExecutorEnd(queryDesc);
317 }
318
319 void
320 standard_ExecutorEnd(QueryDesc *queryDesc)
321 {
322         EState     *estate;
323         MemoryContext oldcontext;
324
325         /* sanity checks */
326         Assert(queryDesc != NULL);
327
328         estate = queryDesc->estate;
329
330         Assert(estate != NULL);
331
332         /*
333          * Switch into per-query memory context to run ExecEndPlan
334          */
335         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
336
337         ExecEndPlan(queryDesc->planstate, estate);
338
339         /*
340          * Close the SELECT INTO relation if any
341          */
342         if (estate->es_select_into)
343                 CloseIntoRel(queryDesc);
344
345         /* do away with our snapshots */
346         UnregisterSnapshot(estate->es_snapshot);
347         UnregisterSnapshot(estate->es_crosscheck_snapshot);
348
349         /*
350          * Must switch out of context before destroying it
351          */
352         MemoryContextSwitchTo(oldcontext);
353
354         /*
355          * Release EState and per-query memory context.  This should release
356          * everything the executor has allocated.
357          */
358         FreeExecutorState(estate);
359
360         /* Reset queryDesc fields that no longer point to anything */
361         queryDesc->tupDesc = NULL;
362         queryDesc->estate = NULL;
363         queryDesc->planstate = NULL;
364         queryDesc->totaltime = NULL;
365 }
366
367 /* ----------------------------------------------------------------
368  *              ExecutorRewind
369  *
370  *              This routine may be called on an open queryDesc to rewind it
371  *              to the start.
372  * ----------------------------------------------------------------
373  */
374 void
375 ExecutorRewind(QueryDesc *queryDesc)
376 {
377         EState     *estate;
378         MemoryContext oldcontext;
379
380         /* sanity checks */
381         Assert(queryDesc != NULL);
382
383         estate = queryDesc->estate;
384
385         Assert(estate != NULL);
386
387         /* It's probably not sensible to rescan updating queries */
388         Assert(queryDesc->operation == CMD_SELECT);
389
390         /*
391          * Switch into per-query memory context
392          */
393         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
394
395         /*
396          * rescan plan
397          */
398         ExecReScan(queryDesc->planstate, NULL);
399
400         MemoryContextSwitchTo(oldcontext);
401 }
402
403
404 /*
405  * ExecCheckRTPerms
406  *              Check access permissions for all relations listed in a range table.
407  */
408 static void
409 ExecCheckRTPerms(List *rangeTable)
410 {
411         ListCell   *l;
412
413         foreach(l, rangeTable)
414         {
415                 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
416         }
417 }
418
419 /*
420  * ExecCheckRTEPerms
421  *              Check access permissions for a single RTE.
422  */
423 static void
424 ExecCheckRTEPerms(RangeTblEntry *rte)
425 {
426         AclMode         requiredPerms;
427         AclMode         relPerms;
428         AclMode         remainingPerms;
429         Oid                     relOid;
430         Oid                     userid;
431         Bitmapset  *tmpset;
432         int                     col;
433
434         /*
435          * Only plain-relation RTEs need to be checked here.  Function RTEs are
436          * checked by init_fcache when the function is prepared for execution.
437          * Join, subquery, and special RTEs need no checks.
438          */
439         if (rte->rtekind != RTE_RELATION)
440                 return;
441
442         /*
443          * No work if requiredPerms is empty.
444          */
445         requiredPerms = rte->requiredPerms;
446         if (requiredPerms == 0)
447                 return;
448
449         relOid = rte->relid;
450
451         /*
452          * userid to check as: current user unless we have a setuid indication.
453          *
454          * Note: GetUserId() is presently fast enough that there's no harm in
455          * calling it separately for each RTE.  If that stops being true, we could
456          * call it once in ExecCheckRTPerms and pass the userid down from there.
457          * But for now, no need for the extra clutter.
458          */
459         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
460
461         /*
462          * We must have *all* the requiredPerms bits, but some of the bits can be
463          * satisfied from column-level rather than relation-level permissions.
464          * First, remove any bits that are satisfied by relation permissions.
465          */
466         relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
467         remainingPerms = requiredPerms & ~relPerms;
468         if (remainingPerms != 0)
469         {
470                 /*
471                  * If we lack any permissions that exist only as relation permissions,
472                  * we can fail straight away.
473                  */
474                 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
475                         aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
476                                                    get_rel_name(relOid));
477
478                 /*
479                  * Check to see if we have the needed privileges at column level.
480                  *
481                  * Note: failures just report a table-level error; it would be nicer
482                  * to report a column-level error if we have some but not all of the
483                  * column privileges.
484                  */
485                 if (remainingPerms & ACL_SELECT)
486                 {
487                         /*
488                          * When the query doesn't explicitly reference any columns (for
489                          * example, SELECT COUNT(*) FROM table), allow the query if we
490                          * have SELECT on any column of the rel, as per SQL spec.
491                          */
492                         if (bms_is_empty(rte->selectedCols))
493                         {
494                                 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
495                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
496                                         aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
497                                                                    get_rel_name(relOid));
498                         }
499
500                         tmpset = bms_copy(rte->selectedCols);
501                         while ((col = bms_first_member(tmpset)) >= 0)
502                         {
503                                 /* remove the column number offset */
504                                 col += FirstLowInvalidHeapAttributeNumber;
505                                 if (col == InvalidAttrNumber)
506                                 {
507                                         /* Whole-row reference, must have priv on all cols */
508                                         if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
509                                                                                                   ACLMASK_ALL) != ACLCHECK_OK)
510                                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
511                                                                            get_rel_name(relOid));
512                                 }
513                                 else
514                                 {
515                                         if (pg_attribute_aclcheck(relOid, col, userid, ACL_SELECT)
516                                                 != ACLCHECK_OK)
517                                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
518                                                                            get_rel_name(relOid));
519                                 }
520                         }
521                         bms_free(tmpset);
522                 }
523
524                 /*
525                  * Basically the same for the mod columns, with either INSERT or
526                  * UPDATE privilege as specified by remainingPerms.
527                  */
528                 remainingPerms &= ~ACL_SELECT;
529                 if (remainingPerms != 0)
530                 {
531                         /*
532                          * When the query doesn't explicitly change any columns, allow the
533                          * query if we have permission on any column of the rel.  This is
534                          * to handle SELECT FOR UPDATE as well as possible corner cases in
535                          * INSERT and UPDATE.
536                          */
537                         if (bms_is_empty(rte->modifiedCols))
538                         {
539                                 if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
540                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
541                                         aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
542                                                                    get_rel_name(relOid));
543                         }
544
545                         tmpset = bms_copy(rte->modifiedCols);
546                         while ((col = bms_first_member(tmpset)) >= 0)
547                         {
548                                 /* remove the column number offset */
549                                 col += FirstLowInvalidHeapAttributeNumber;
550                                 if (col == InvalidAttrNumber)
551                                 {
552                                         /* whole-row reference can't happen here */
553                                         elog(ERROR, "whole-row update is not implemented");
554                                 }
555                                 else
556                                 {
557                                         if (pg_attribute_aclcheck(relOid, col, userid, remainingPerms)
558                                                 != ACLCHECK_OK)
559                                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
560                                                                            get_rel_name(relOid));
561                                 }
562                         }
563                         bms_free(tmpset);
564                 }
565         }
566 }
567
568 /*
569  * Check that the query does not imply any writes to non-temp tables.
570  */
571 static void
572 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
573 {
574         ListCell   *l;
575
576         /*
577          * CREATE TABLE AS or SELECT INTO?
578          *
579          * XXX should we allow this if the destination is temp?
580          */
581         if (plannedstmt->intoClause != NULL)
582                 goto fail;
583
584         /* Fail if write permissions are requested on any non-temp table */
585         foreach(l, plannedstmt->rtable)
586         {
587                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
588
589                 if (rte->rtekind != RTE_RELATION)
590                         continue;
591
592                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
593                         continue;
594
595                 if (isTempNamespace(get_rel_namespace(rte->relid)))
596                         continue;
597
598                 goto fail;
599         }
600
601         return;
602
603 fail:
604         ereport(ERROR,
605                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
606                          errmsg("transaction is read-only")));
607 }
608
609
610 /* ----------------------------------------------------------------
611  *              InitPlan
612  *
613  *              Initializes the query plan: open files, allocate storage
614  *              and start up the rule manager
615  * ----------------------------------------------------------------
616  */
617 static void
618 InitPlan(QueryDesc *queryDesc, int eflags)
619 {
620         CmdType         operation = queryDesc->operation;
621         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
622         Plan       *plan = plannedstmt->planTree;
623         List       *rangeTable = plannedstmt->rtable;
624         EState     *estate = queryDesc->estate;
625         PlanState  *planstate;
626         TupleDesc       tupType;
627         ListCell   *l;
628         int                     i;
629
630         /*
631          * Do permissions checks
632          */
633         ExecCheckRTPerms(rangeTable);
634
635         /*
636          * initialize the node's execution state
637          */
638         estate->es_range_table = rangeTable;
639         estate->es_plannedstmt = plannedstmt;
640
641         /*
642          * initialize result relation stuff, and open/lock the result rels.
643          *
644          * We must do this before initializing the plan tree, else we might
645          * try to do a lock upgrade if a result rel is also a source rel.
646          */
647         if (plannedstmt->resultRelations)
648         {
649                 List       *resultRelations = plannedstmt->resultRelations;
650                 int                     numResultRelations = list_length(resultRelations);
651                 ResultRelInfo *resultRelInfos;
652                 ResultRelInfo *resultRelInfo;
653
654                 resultRelInfos = (ResultRelInfo *)
655                         palloc(numResultRelations * sizeof(ResultRelInfo));
656                 resultRelInfo = resultRelInfos;
657                 foreach(l, resultRelations)
658                 {
659                         Index           resultRelationIndex = lfirst_int(l);
660                         Oid                     resultRelationOid;
661                         Relation        resultRelation;
662
663                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
664                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
665                         InitResultRelInfo(resultRelInfo,
666                                                           resultRelation,
667                                                           resultRelationIndex,
668                                                           operation,
669                                                           estate->es_instrument);
670                         resultRelInfo++;
671                 }
672                 estate->es_result_relations = resultRelInfos;
673                 estate->es_num_result_relations = numResultRelations;
674                 /* es_result_relation_info is NULL except when within ModifyTable */
675                 estate->es_result_relation_info = NULL;
676         }
677         else
678         {
679                 /*
680                  * if no result relation, then set state appropriately
681                  */
682                 estate->es_result_relations = NULL;
683                 estate->es_num_result_relations = 0;
684                 estate->es_result_relation_info = NULL;
685         }
686
687         /*
688          * Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
689          * before we initialize the plan tree, else we'd be risking lock
690          * upgrades.  While we are at it, build the ExecRowMark list.
691          */
692         estate->es_rowMarks = NIL;
693         foreach(l, plannedstmt->rowMarks)
694         {
695                 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
696                 Oid                     relid;
697                 Relation        relation;
698                 ExecRowMark *erm;
699
700                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
701                 if (rc->isParent)
702                         continue;
703
704                 switch (rc->markType)
705                 {
706                         case ROW_MARK_EXCLUSIVE:
707                         case ROW_MARK_SHARE:
708                                 relid = getrelid(rc->rti, rangeTable);
709                                 relation = heap_open(relid, RowShareLock);
710                                 break;
711                         case ROW_MARK_REFERENCE:
712                                 relid = getrelid(rc->rti, rangeTable);
713                                 relation = heap_open(relid, AccessShareLock);
714                                 break;
715                         case ROW_MARK_COPY:
716                                 /* there's no real table here ... */
717                                 relation = NULL;
718                                 break;
719                         default:
720                                 elog(ERROR, "unrecognized markType: %d", rc->markType);
721                                 relation = NULL;        /* keep compiler quiet */
722                                 break;
723                 }
724
725                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
726                 erm->relation = relation;
727                 erm->rti = rc->rti;
728                 erm->prti = rc->prti;
729                 erm->markType = rc->markType;
730                 erm->noWait = rc->noWait;
731                 erm->ctidAttNo = rc->ctidAttNo;
732                 erm->toidAttNo = rc->toidAttNo;
733                 erm->wholeAttNo = rc->wholeAttNo;
734                 ItemPointerSetInvalid(&(erm->curCtid));
735                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
736         }
737
738         /*
739          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
740          * flag appropriately so that the plan tree will be initialized with the
741          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
742          */
743         estate->es_select_into = false;
744         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
745         {
746                 estate->es_select_into = true;
747                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
748         }
749
750         /*
751          * Initialize the executor's tuple table to empty.
752          */
753         estate->es_tupleTable = NIL;
754         estate->es_trig_tuple_slot = NULL;
755         estate->es_trig_oldtup_slot = NULL;
756
757         /* mark EvalPlanQual not active */
758         estate->es_epqTuple = NULL;
759         estate->es_epqTupleSet = NULL;
760         estate->es_epqScanDone = NULL;
761
762         /*
763          * Initialize private state information for each SubPlan.  We must do this
764          * before running ExecInitNode on the main query tree, since
765          * ExecInitSubPlan expects to be able to find these entries.
766          */
767         Assert(estate->es_subplanstates == NIL);
768         i = 1;                                          /* subplan indices count from 1 */
769         foreach(l, plannedstmt->subplans)
770         {
771                 Plan       *subplan = (Plan *) lfirst(l);
772                 PlanState  *subplanstate;
773                 int                     sp_eflags;
774
775                 /*
776                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
777                  * it is a parameterless subplan (not initplan), we suggest that it be
778                  * prepared to handle REWIND efficiently; otherwise there is no need.
779                  */
780                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
781                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
782                         sp_eflags |= EXEC_FLAG_REWIND;
783
784                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
785
786                 estate->es_subplanstates = lappend(estate->es_subplanstates,
787                                                                                    subplanstate);
788
789                 i++;
790         }
791
792         /*
793          * Initialize the private state information for all the nodes in the query
794          * tree.  This opens files, allocates storage and leaves us ready to start
795          * processing tuples.
796          */
797         planstate = ExecInitNode(plan, estate, eflags);
798
799         /*
800          * Get the tuple descriptor describing the type of tuples to return. (this
801          * is especially important if we are creating a relation with "SELECT
802          * INTO")
803          */
804         tupType = ExecGetResultType(planstate);
805
806         /*
807          * Initialize the junk filter if needed.  SELECT queries need a
808          * filter if there are any junk attrs in the top-level tlist.
809          */
810         if (operation == CMD_SELECT)
811         {
812                 bool            junk_filter_needed = false;
813                 ListCell   *tlist;
814
815                 foreach(tlist, plan->targetlist)
816                 {
817                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
818
819                         if (tle->resjunk)
820                         {
821                                 junk_filter_needed = true;
822                                 break;
823                         }
824                 }
825
826                 if (junk_filter_needed)
827                 {
828                         JunkFilter *j;
829
830                         j = ExecInitJunkFilter(planstate->plan->targetlist,
831                                                                    tupType->tdhasoid,
832                                                                    ExecInitExtraTupleSlot(estate));
833                         estate->es_junkFilter = j;
834
835                         /* Want to return the cleaned tuple type */
836                         tupType = j->jf_cleanTupType;
837                 }
838         }
839
840         queryDesc->tupDesc = tupType;
841         queryDesc->planstate = planstate;
842
843         /*
844          * If doing SELECT INTO, initialize the "into" relation.  We must wait
845          * till now so we have the "clean" result tuple type to create the new
846          * table from.
847          *
848          * If EXPLAIN, skip creating the "into" relation.
849          */
850         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
851                 OpenIntoRel(queryDesc);
852 }
853
854 /*
855  * Initialize ResultRelInfo data for one result relation
856  */
857 void
858 InitResultRelInfo(ResultRelInfo *resultRelInfo,
859                                   Relation resultRelationDesc,
860                                   Index resultRelationIndex,
861                                   CmdType operation,
862                                   int instrument_options)
863 {
864         /*
865          * Check valid relkind ... parser and/or planner should have noticed this
866          * already, but let's make sure.
867          */
868         switch (resultRelationDesc->rd_rel->relkind)
869         {
870                 case RELKIND_RELATION:
871                         /* OK */
872                         break;
873                 case RELKIND_SEQUENCE:
874                         ereport(ERROR,
875                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
876                                          errmsg("cannot change sequence \"%s\"",
877                                                         RelationGetRelationName(resultRelationDesc))));
878                         break;
879                 case RELKIND_TOASTVALUE:
880                         ereport(ERROR,
881                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
882                                          errmsg("cannot change TOAST relation \"%s\"",
883                                                         RelationGetRelationName(resultRelationDesc))));
884                         break;
885                 case RELKIND_VIEW:
886                         ereport(ERROR,
887                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
888                                          errmsg("cannot change view \"%s\"",
889                                                         RelationGetRelationName(resultRelationDesc))));
890                         break;
891                 default:
892                         ereport(ERROR,
893                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
894                                          errmsg("cannot change relation \"%s\"",
895                                                         RelationGetRelationName(resultRelationDesc))));
896                         break;
897         }
898
899         /* OK, fill in the node */
900         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
901         resultRelInfo->type = T_ResultRelInfo;
902         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
903         resultRelInfo->ri_RelationDesc = resultRelationDesc;
904         resultRelInfo->ri_NumIndices = 0;
905         resultRelInfo->ri_IndexRelationDescs = NULL;
906         resultRelInfo->ri_IndexRelationInfo = NULL;
907         /* make a copy so as not to depend on relcache info not changing... */
908         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
909         if (resultRelInfo->ri_TrigDesc)
910         {
911                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
912
913                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
914                         palloc0(n * sizeof(FmgrInfo));
915                 resultRelInfo->ri_TrigWhenExprs = (List **)
916                         palloc0(n * sizeof(List *));
917                 if (instrument_options)
918                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
919         }
920         else
921         {
922                 resultRelInfo->ri_TrigFunctions = NULL;
923                 resultRelInfo->ri_TrigWhenExprs = NULL;
924                 resultRelInfo->ri_TrigInstrument = NULL;
925         }
926         resultRelInfo->ri_ConstraintExprs = NULL;
927         resultRelInfo->ri_junkFilter = NULL;
928         resultRelInfo->ri_projectReturning = NULL;
929
930         /*
931          * If there are indices on the result relation, open them and save
932          * descriptors in the result relation info, so that we can add new index
933          * entries for the tuples we add/update.  We need not do this for a
934          * DELETE, however, since deletion doesn't affect indexes.
935          */
936         if (resultRelationDesc->rd_rel->relhasindex &&
937                 operation != CMD_DELETE)
938                 ExecOpenIndices(resultRelInfo);
939 }
940
941 /*
942  *              ExecGetTriggerResultRel
943  *
944  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
945  * triggers are fired on one of the result relations of the query, and so
946  * we can just return a member of the es_result_relations array.  (Note: in
947  * self-join situations there might be multiple members with the same OID;
948  * if so it doesn't matter which one we pick.)  However, it is sometimes
949  * necessary to fire triggers on other relations; this happens mainly when an
950  * RI update trigger queues additional triggers on other relations, which will
951  * be processed in the context of the outer query.      For efficiency's sake,
952  * we want to have a ResultRelInfo for those triggers too; that can avoid
953  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
954  * ANALYZE to report the runtimes of such triggers.)  So we make additional
955  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
956  */
957 ResultRelInfo *
958 ExecGetTriggerResultRel(EState *estate, Oid relid)
959 {
960         ResultRelInfo *rInfo;
961         int                     nr;
962         ListCell   *l;
963         Relation        rel;
964         MemoryContext oldcontext;
965
966         /* First, search through the query result relations */
967         rInfo = estate->es_result_relations;
968         nr = estate->es_num_result_relations;
969         while (nr > 0)
970         {
971                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
972                         return rInfo;
973                 rInfo++;
974                 nr--;
975         }
976         /* Nope, but maybe we already made an extra ResultRelInfo for it */
977         foreach(l, estate->es_trig_target_relations)
978         {
979                 rInfo = (ResultRelInfo *) lfirst(l);
980                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
981                         return rInfo;
982         }
983         /* Nope, so we need a new one */
984
985         /*
986          * Open the target relation's relcache entry.  We assume that an
987          * appropriate lock is still held by the backend from whenever the trigger
988          * event got queued, so we need take no new lock here.
989          */
990         rel = heap_open(relid, NoLock);
991
992         /*
993          * Make the new entry in the right context.  Currently, we don't need any
994          * index information in ResultRelInfos used only for triggers, so tell
995          * InitResultRelInfo it's a DELETE.
996          */
997         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
998         rInfo = makeNode(ResultRelInfo);
999         InitResultRelInfo(rInfo,
1000                                           rel,
1001                                           0,            /* dummy rangetable index */
1002                                           CMD_DELETE,
1003                                           estate->es_instrument);
1004         estate->es_trig_target_relations =
1005                 lappend(estate->es_trig_target_relations, rInfo);
1006         MemoryContextSwitchTo(oldcontext);
1007
1008         return rInfo;
1009 }
1010
1011 /*
1012  *              ExecContextForcesOids
1013  *
1014  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1015  * we need to ensure that result tuples have space for an OID iff they are
1016  * going to be stored into a relation that has OIDs.  In other contexts
1017  * we are free to choose whether to leave space for OIDs in result tuples
1018  * (we generally don't want to, but we do if a physical-tlist optimization
1019  * is possible).  This routine checks the plan context and returns TRUE if the
1020  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1021  * *hasoids is set to the required value.
1022  *
1023  * One reason this is ugly is that all plan nodes in the plan tree will emit
1024  * tuples with space for an OID, though we really only need the topmost node
1025  * to do so.  However, node types like Sort don't project new tuples but just
1026  * return their inputs, and in those cases the requirement propagates down
1027  * to the input node.  Eventually we might make this code smart enough to
1028  * recognize how far down the requirement really goes, but for now we just
1029  * make all plan nodes do the same thing if the top level forces the choice.
1030  *
1031  * We assume that if we are generating tuples for INSERT or UPDATE,
1032  * estate->es_result_relation_info is already set up to describe the target
1033  * relation.  Note that in an UPDATE that spans an inheritance tree, some of
1034  * the target relations may have OIDs and some not.  We have to make the
1035  * decisions on a per-relation basis as we initialize each of the subplans of
1036  * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1037  * while initializing each subplan.
1038  *
1039  * SELECT INTO is even uglier, because we don't have the INTO relation's
1040  * descriptor available when this code runs; we have to look aside at a
1041  * flag set by InitPlan().
1042  */
1043 bool
1044 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1045 {
1046         ResultRelInfo *ri = planstate->state->es_result_relation_info;
1047
1048         if (ri != NULL)
1049         {
1050                 Relation        rel = ri->ri_RelationDesc;
1051
1052                 if (rel != NULL)
1053                 {
1054                         *hasoids = rel->rd_rel->relhasoids;
1055                         return true;
1056                 }
1057         }
1058
1059         if (planstate->state->es_select_into)
1060         {
1061                 *hasoids = planstate->state->es_into_oids;
1062                 return true;
1063         }
1064
1065         return false;
1066 }
1067
1068 /* ----------------------------------------------------------------
1069  *              ExecEndPlan
1070  *
1071  *              Cleans up the query plan -- closes files and frees up storage
1072  *
1073  * NOTE: we are no longer very worried about freeing storage per se
1074  * in this code; FreeExecutorState should be guaranteed to release all
1075  * memory that needs to be released.  What we are worried about doing
1076  * is closing relations and dropping buffer pins.  Thus, for example,
1077  * tuple tables must be cleared or dropped to ensure pins are released.
1078  * ----------------------------------------------------------------
1079  */
1080 static void
1081 ExecEndPlan(PlanState *planstate, EState *estate)
1082 {
1083         ResultRelInfo *resultRelInfo;
1084         int                     i;
1085         ListCell   *l;
1086
1087         /*
1088          * shut down the node-type-specific query processing
1089          */
1090         ExecEndNode(planstate);
1091
1092         /*
1093          * for subplans too
1094          */
1095         foreach(l, estate->es_subplanstates)
1096         {
1097                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1098
1099                 ExecEndNode(subplanstate);
1100         }
1101
1102         /*
1103          * destroy the executor's tuple table.  Actually we only care about
1104          * releasing buffer pins and tupdesc refcounts; there's no need to
1105          * pfree the TupleTableSlots, since the containing memory context
1106          * is about to go away anyway.
1107          */
1108         ExecResetTupleTable(estate->es_tupleTable, false);
1109
1110         /*
1111          * close the result relation(s) if any, but hold locks until xact commit.
1112          */
1113         resultRelInfo = estate->es_result_relations;
1114         for (i = estate->es_num_result_relations; i > 0; i--)
1115         {
1116                 /* Close indices and then the relation itself */
1117                 ExecCloseIndices(resultRelInfo);
1118                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1119                 resultRelInfo++;
1120         }
1121
1122         /*
1123          * likewise close any trigger target relations
1124          */
1125         foreach(l, estate->es_trig_target_relations)
1126         {
1127                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1128                 /* Close indices and then the relation itself */
1129                 ExecCloseIndices(resultRelInfo);
1130                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1131         }
1132
1133         /*
1134          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1135          */
1136         foreach(l, estate->es_rowMarks)
1137         {
1138                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1139
1140                 if (erm->relation)
1141                         heap_close(erm->relation, NoLock);
1142         }
1143 }
1144
1145 /* ----------------------------------------------------------------
1146  *              ExecutePlan
1147  *
1148  *              Processes the query plan until we have processed 'numberTuples' tuples,
1149  *              moving in the specified direction.
1150  *
1151  *              Runs to completion if numberTuples is 0
1152  *
1153  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1154  * user can see it
1155  * ----------------------------------------------------------------
1156  */
1157 static void
1158 ExecutePlan(EState *estate,
1159                         PlanState *planstate,
1160                         CmdType operation,
1161                         bool sendTuples,
1162                         long numberTuples,
1163                         ScanDirection direction,
1164                         DestReceiver *dest)
1165 {
1166         TupleTableSlot *slot;
1167         long            current_tuple_count;
1168
1169         /*
1170          * initialize local variables
1171          */
1172         current_tuple_count = 0;
1173
1174         /*
1175          * Set the direction.
1176          */
1177         estate->es_direction = direction;
1178
1179         /*
1180          * Loop until we've processed the proper number of tuples from the plan.
1181          */
1182         for (;;)
1183         {
1184                 /* Reset the per-output-tuple exprcontext */
1185                 ResetPerTupleExprContext(estate);
1186
1187                 /*
1188                  * Execute the plan and obtain a tuple
1189                  */
1190                 slot = ExecProcNode(planstate);
1191
1192                 /*
1193                  * if the tuple is null, then we assume there is nothing more to
1194                  * process so we just end the loop...
1195                  */
1196                 if (TupIsNull(slot))
1197                         break;
1198
1199                 /*
1200                  * If we have a junk filter, then project a new tuple with the junk
1201                  * removed.
1202                  *
1203                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1204                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1205                  * because that tuple slot has the wrong descriptor.)
1206                  */
1207                 if (estate->es_junkFilter != NULL)
1208                         slot = ExecFilterJunk(estate->es_junkFilter, slot);
1209
1210                 /*
1211                  * If we are supposed to send the tuple somewhere, do so.
1212                  * (In practice, this is probably always the case at this point.)
1213                  */
1214                 if (sendTuples)
1215                         (*dest->receiveSlot) (slot, dest);
1216
1217                 /*
1218                  * Count tuples processed, if this is a SELECT.  (For other operation
1219                  * types, the ModifyTable plan node must count the appropriate
1220                  * events.)
1221                  */
1222                 if (operation == CMD_SELECT)
1223                         (estate->es_processed)++;
1224
1225                 /*
1226                  * check our tuple count.. if we've processed the proper number then
1227                  * quit, else loop again and process more tuples.  Zero numberTuples
1228                  * means no limit.
1229                  */
1230                 current_tuple_count++;
1231                 if (numberTuples && numberTuples == current_tuple_count)
1232                         break;
1233         }
1234 }
1235
1236
1237 /*
1238  * ExecRelCheck --- check that tuple meets constraints for result relation
1239  */
1240 static const char *
1241 ExecRelCheck(ResultRelInfo *resultRelInfo,
1242                          TupleTableSlot *slot, EState *estate)
1243 {
1244         Relation        rel = resultRelInfo->ri_RelationDesc;
1245         int                     ncheck = rel->rd_att->constr->num_check;
1246         ConstrCheck *check = rel->rd_att->constr->check;
1247         ExprContext *econtext;
1248         MemoryContext oldContext;
1249         List       *qual;
1250         int                     i;
1251
1252         /*
1253          * If first time through for this result relation, build expression
1254          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1255          * memory context so they'll survive throughout the query.
1256          */
1257         if (resultRelInfo->ri_ConstraintExprs == NULL)
1258         {
1259                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1260                 resultRelInfo->ri_ConstraintExprs =
1261                         (List **) palloc(ncheck * sizeof(List *));
1262                 for (i = 0; i < ncheck; i++)
1263                 {
1264                         /* ExecQual wants implicit-AND form */
1265                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1266                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1267                                 ExecPrepareExpr((Expr *) qual, estate);
1268                 }
1269                 MemoryContextSwitchTo(oldContext);
1270         }
1271
1272         /*
1273          * We will use the EState's per-tuple context for evaluating constraint
1274          * expressions (creating it if it's not already there).
1275          */
1276         econtext = GetPerTupleExprContext(estate);
1277
1278         /* Arrange for econtext's scan tuple to be the tuple under test */
1279         econtext->ecxt_scantuple = slot;
1280
1281         /* And evaluate the constraints */
1282         for (i = 0; i < ncheck; i++)
1283         {
1284                 qual = resultRelInfo->ri_ConstraintExprs[i];
1285
1286                 /*
1287                  * NOTE: SQL92 specifies that a NULL result from a constraint
1288                  * expression is not to be treated as a failure.  Therefore, tell
1289                  * ExecQual to return TRUE for NULL.
1290                  */
1291                 if (!ExecQual(qual, econtext, true))
1292                         return check[i].ccname;
1293         }
1294
1295         /* NULL result means no error */
1296         return NULL;
1297 }
1298
1299 void
1300 ExecConstraints(ResultRelInfo *resultRelInfo,
1301                                 TupleTableSlot *slot, EState *estate)
1302 {
1303         Relation        rel = resultRelInfo->ri_RelationDesc;
1304         TupleConstr *constr = rel->rd_att->constr;
1305
1306         Assert(constr);
1307
1308         if (constr->has_not_null)
1309         {
1310                 int                     natts = rel->rd_att->natts;
1311                 int                     attrChk;
1312
1313                 for (attrChk = 1; attrChk <= natts; attrChk++)
1314                 {
1315                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1316                                 slot_attisnull(slot, attrChk))
1317                                 ereport(ERROR,
1318                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1319                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1320                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1321                 }
1322         }
1323
1324         if (constr->num_check > 0)
1325         {
1326                 const char *failed;
1327
1328                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1329                         ereport(ERROR,
1330                                         (errcode(ERRCODE_CHECK_VIOLATION),
1331                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1332                                                         RelationGetRelationName(rel), failed)));
1333         }
1334 }
1335
1336
1337 /*
1338  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
1339  * process the updated version under READ COMMITTED rules.
1340  *
1341  * See backend/executor/README for some info about how this works.
1342  */
1343
1344
1345 /*
1346  * Check a modified tuple to see if we want to process its updated version
1347  * under READ COMMITTED rules.
1348  *
1349  *      estate - outer executor state data
1350  *      epqstate - state for EvalPlanQual rechecking
1351  *      relation - table containing tuple
1352  *      rti - rangetable index of table containing tuple
1353  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1354  *      priorXmax - t_xmax from the outdated tuple
1355  *
1356  * *tid is also an output parameter: it's modified to hold the TID of the
1357  * latest version of the tuple (note this may be changed even on failure)
1358  *
1359  * Returns a slot containing the new candidate update/delete tuple, or
1360  * NULL if we determine we shouldn't process the row.
1361  */
1362 TupleTableSlot *
1363 EvalPlanQual(EState *estate, EPQState *epqstate,
1364                          Relation relation, Index rti,
1365                          ItemPointer tid, TransactionId priorXmax)
1366 {
1367         TupleTableSlot *slot;
1368         HeapTuple       copyTuple;
1369
1370         Assert(rti > 0);
1371
1372         /*
1373          * Get and lock the updated version of the row; if fail, return NULL.
1374          */
1375         copyTuple = EvalPlanQualFetch(estate, relation, LockTupleExclusive,
1376                                                                   tid, priorXmax);
1377
1378         if (copyTuple == NULL)
1379                 return NULL;
1380
1381         /*
1382          * For UPDATE/DELETE we have to return tid of actual row we're executing
1383          * PQ for.
1384          */
1385         *tid = copyTuple->t_self;
1386
1387         /*
1388          * Need to run a recheck subquery.      Initialize or reinitialize EPQ state.
1389          */
1390         EvalPlanQualBegin(epqstate, estate);
1391
1392         /*
1393          * Free old test tuple, if any, and store new tuple where relation's
1394          * scan node will see it
1395          */
1396         EvalPlanQualSetTuple(epqstate, rti, copyTuple);
1397
1398         /*
1399          * Fetch any non-locked source rows
1400          */
1401         EvalPlanQualFetchRowMarks(epqstate);
1402
1403         /*
1404          * Run the EPQ query.  We assume it will return at most one tuple.
1405          */
1406         slot = EvalPlanQualNext(epqstate);
1407
1408         /*
1409          * If we got a tuple, force the slot to materialize the tuple so that
1410          * it is not dependent on any local state in the EPQ query (in particular,
1411          * it's highly likely that the slot contains references to any pass-by-ref
1412          * datums that may be present in copyTuple).  As with the next step,
1413          * this is to guard against early re-use of the EPQ query.
1414          */
1415         if (!TupIsNull(slot))
1416                 (void) ExecMaterializeSlot(slot);
1417
1418         /*
1419          * Clear out the test tuple.  This is needed in case the EPQ query
1420          * is re-used to test a tuple for a different relation.  (Not clear
1421          * that can really happen, but let's be safe.)
1422          */
1423         EvalPlanQualSetTuple(epqstate, rti, NULL);
1424
1425         return slot;
1426 }
1427
1428 /*
1429  * Fetch a copy of the newest version of an outdated tuple
1430  *
1431  *      estate - executor state data
1432  *      relation - table containing tuple
1433  *      lockmode - requested tuple lock mode
1434  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1435  *      priorXmax - t_xmax from the outdated tuple
1436  *
1437  * Returns a palloc'd copy of the newest tuple version, or NULL if we find
1438  * that there is no newest version (ie, the row was deleted not updated).
1439  * If successful, we have locked the newest tuple version, so caller does not
1440  * need to worry about it changing anymore.
1441  *
1442  * Note: properly, lockmode should be declared as enum LockTupleMode,
1443  * but we use "int" to avoid having to include heapam.h in executor.h.
1444  */
1445 HeapTuple
1446 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
1447                                   ItemPointer tid, TransactionId priorXmax)
1448 {
1449         HeapTuple       copyTuple = NULL;
1450         HeapTupleData tuple;
1451         SnapshotData SnapshotDirty;
1452
1453         /*
1454          * fetch target tuple
1455          *
1456          * Loop here to deal with updated or busy tuples
1457          */
1458         InitDirtySnapshot(SnapshotDirty);
1459         tuple.t_self = *tid;
1460         for (;;)
1461         {
1462                 Buffer          buffer;
1463
1464                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
1465                 {
1466                         HTSU_Result test;
1467                         ItemPointerData update_ctid;
1468                         TransactionId update_xmax;
1469
1470                         /*
1471                          * If xmin isn't what we're expecting, the slot must have been
1472                          * recycled and reused for an unrelated tuple.  This implies that
1473                          * the latest version of the row was deleted, so we need do
1474                          * nothing.  (Should be safe to examine xmin without getting
1475                          * buffer's content lock, since xmin never changes in an existing
1476                          * tuple.)
1477                          */
1478                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1479                                                                          priorXmax))
1480                         {
1481                                 ReleaseBuffer(buffer);
1482                                 return NULL;
1483                         }
1484
1485                         /* otherwise xmin should not be dirty... */
1486                         if (TransactionIdIsValid(SnapshotDirty.xmin))
1487                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1488
1489                         /*
1490                          * If tuple is being updated by other transaction then we have to
1491                          * wait for its commit/abort.
1492                          */
1493                         if (TransactionIdIsValid(SnapshotDirty.xmax))
1494                         {
1495                                 ReleaseBuffer(buffer);
1496                                 XactLockTableWait(SnapshotDirty.xmax);
1497                                 continue;               /* loop back to repeat heap_fetch */
1498                         }
1499
1500                         /*
1501                          * If tuple was inserted by our own transaction, we have to check
1502                          * cmin against es_output_cid: cmin >= current CID means our
1503                          * command cannot see the tuple, so we should ignore it.  Without
1504                          * this we are open to the "Halloween problem" of indefinitely
1505                          * re-updating the same tuple. (We need not check cmax because
1506                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
1507                          * transaction dead, regardless of cmax.)  We just checked that
1508                          * priorXmax == xmin, so we can test that variable instead of
1509                          * doing HeapTupleHeaderGetXmin again.
1510                          */
1511                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1512                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
1513                         {
1514                                 ReleaseBuffer(buffer);
1515                                 return NULL;
1516                         }
1517
1518                         /*
1519                          * This is a live tuple, so now try to lock it.
1520                          */
1521                         test = heap_lock_tuple(relation, &tuple, &buffer,
1522                                                                    &update_ctid, &update_xmax,
1523                                                                    estate->es_output_cid,
1524                                                                    lockmode, false);
1525                         /* We now have two pins on the buffer, get rid of one */
1526                         ReleaseBuffer(buffer);
1527
1528                         switch (test)
1529                         {
1530                                 case HeapTupleSelfUpdated:
1531                                         /* treat it as deleted; do not process */
1532                                         ReleaseBuffer(buffer);
1533                                         return NULL;
1534
1535                                 case HeapTupleMayBeUpdated:
1536                                         /* successfully locked */
1537                                         break;
1538
1539                                 case HeapTupleUpdated:
1540                                         ReleaseBuffer(buffer);
1541                                         if (IsXactIsoLevelSerializable)
1542                                                 ereport(ERROR,
1543                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1544                                                                  errmsg("could not serialize access due to concurrent update")));
1545                                         if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
1546                                         {
1547                                                 /* it was updated, so look at the updated version */
1548                                                 tuple.t_self = update_ctid;
1549                                                 continue;
1550                                         }
1551                                         /* tuple was deleted, so give up */
1552                                         return NULL;
1553
1554                                 default:
1555                                         ReleaseBuffer(buffer);
1556                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1557                                                  test);
1558                                         return NULL;    /* keep compiler quiet */
1559                         }
1560
1561                         /*
1562                          * We got tuple - now copy it for use by recheck query.
1563                          */
1564                         copyTuple = heap_copytuple(&tuple);
1565                         ReleaseBuffer(buffer);
1566                         break;
1567                 }
1568
1569                 /*
1570                  * If the referenced slot was actually empty, the latest version of
1571                  * the row must have been deleted, so we need do nothing.
1572                  */
1573                 if (tuple.t_data == NULL)
1574                 {
1575                         ReleaseBuffer(buffer);
1576                         return NULL;
1577                 }
1578
1579                 /*
1580                  * As above, if xmin isn't what we're expecting, do nothing.
1581                  */
1582                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1583                                                                  priorXmax))
1584                 {
1585                         ReleaseBuffer(buffer);
1586                         return NULL;
1587                 }
1588
1589                 /*
1590                  * If we get here, the tuple was found but failed SnapshotDirty.
1591                  * Assuming the xmin is either a committed xact or our own xact (as it
1592                  * certainly should be if we're trying to modify the tuple), this must
1593                  * mean that the row was updated or deleted by either a committed xact
1594                  * or our own xact.  If it was deleted, we can ignore it; if it was
1595                  * updated then chain up to the next version and repeat the whole
1596                  * process.
1597                  *
1598                  * As above, it should be safe to examine xmax and t_ctid without the
1599                  * buffer content lock, because they can't be changing.
1600                  */
1601                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
1602                 {
1603                         /* deleted, so forget about it */
1604                         ReleaseBuffer(buffer);
1605                         return NULL;
1606                 }
1607
1608                 /* updated, so look at the updated row */
1609                 tuple.t_self = tuple.t_data->t_ctid;
1610                 /* updated row should have xmin matching this xmax */
1611                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
1612                 ReleaseBuffer(buffer);
1613                 /* loop back to fetch next in chain */
1614         }
1615
1616         /*
1617          * Return the copied tuple
1618          */
1619         return copyTuple;
1620 }
1621
1622 /*
1623  * EvalPlanQualInit -- initialize during creation of a plan state node
1624  * that might need to invoke EPQ processing.
1625  * Note: subplan can be NULL if it will be set later with EvalPlanQualSetPlan.
1626  */
1627 void
1628 EvalPlanQualInit(EPQState *epqstate, EState *estate,
1629                                  Plan *subplan, int epqParam)
1630 {
1631         /* Mark the EPQ state inactive */
1632         epqstate->estate = NULL;
1633         epqstate->planstate = NULL;
1634         epqstate->origslot = NULL;
1635         /* ... and remember data that EvalPlanQualBegin will need */
1636         epqstate->plan = subplan;
1637         epqstate->rowMarks = NIL;
1638         epqstate->epqParam = epqParam;
1639 }
1640
1641 /*
1642  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
1643  *
1644  * We need this so that ModifyTuple can deal with multiple subplans.
1645  */
1646 void
1647 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan)
1648 {
1649         /* If we have a live EPQ query, shut it down */
1650         EvalPlanQualEnd(epqstate);
1651         /* And set/change the plan pointer */
1652         epqstate->plan = subplan;
1653 }
1654
1655 /*
1656  * EvalPlanQualAddRowMark -- add an ExecRowMark that EPQ needs to handle.
1657  *
1658  * Currently, only non-locking RowMarks are supported.
1659  */
1660 void
1661 EvalPlanQualAddRowMark(EPQState *epqstate, ExecRowMark *erm)
1662 {
1663         if (RowMarkRequiresRowShareLock(erm->markType))
1664                 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
1665         epqstate->rowMarks = lappend(epqstate->rowMarks, erm);
1666 }
1667
1668 /*
1669  * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
1670  *
1671  * NB: passed tuple must be palloc'd; it may get freed later
1672  */
1673 void
1674 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
1675 {
1676         EState     *estate = epqstate->estate;
1677
1678         Assert(rti > 0);
1679
1680         /*
1681          * free old test tuple, if any, and store new tuple where relation's
1682          * scan node will see it
1683          */
1684         if (estate->es_epqTuple[rti - 1] != NULL)
1685                 heap_freetuple(estate->es_epqTuple[rti - 1]);
1686         estate->es_epqTuple[rti - 1] = tuple;
1687         estate->es_epqTupleSet[rti - 1] = true;
1688 }
1689
1690 /*
1691  * Fetch back the current test tuple (if any) for the specified RTI
1692  */
1693 HeapTuple
1694 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
1695 {
1696         EState     *estate = epqstate->estate;
1697
1698         Assert(rti > 0);
1699
1700         return estate->es_epqTuple[rti - 1];
1701 }
1702
1703 /*
1704  * Fetch the current row values for any non-locked relations that need
1705  * to be scanned by an EvalPlanQual operation.  origslot must have been set
1706  * to contain the current result row (top-level row) that we need to recheck.
1707  */
1708 void
1709 EvalPlanQualFetchRowMarks(EPQState *epqstate)
1710 {
1711         ListCell   *l;
1712
1713         Assert(epqstate->origslot != NULL);
1714
1715         foreach(l, epqstate->rowMarks)
1716         {
1717                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1718                 Datum           datum;
1719                 bool            isNull;
1720                 HeapTupleData tuple;
1721
1722                 /* clear any leftover test tuple for this rel */
1723                 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
1724
1725                 if (erm->relation)
1726                 {
1727                         Buffer          buffer;
1728
1729                         Assert(erm->markType == ROW_MARK_REFERENCE);
1730
1731                         /* if child rel, must check whether it produced this row */
1732                         if (erm->rti != erm->prti)
1733                         {
1734                                 Oid                     tableoid;
1735
1736                                 datum = ExecGetJunkAttribute(epqstate->origslot,
1737                                                                                          erm->toidAttNo,
1738                                                                                          &isNull);
1739                                 /* non-locked rels could be on the inside of outer joins */
1740                                 if (isNull)
1741                                         continue;
1742                                 tableoid = DatumGetObjectId(datum);
1743
1744                                 if (tableoid != RelationGetRelid(erm->relation))
1745                                 {
1746                                         /* this child is inactive right now */
1747                                         continue;
1748                                 }
1749                         }
1750
1751                         /* fetch the tuple's ctid */
1752                         datum = ExecGetJunkAttribute(epqstate->origslot,
1753                                                                                  erm->ctidAttNo,
1754                                                                                  &isNull);
1755                         /* non-locked rels could be on the inside of outer joins */
1756                         if (isNull)
1757                                 continue;
1758                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1759
1760                         /* okay, fetch the tuple */
1761                         if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
1762                                                         false, NULL))
1763                                 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
1764
1765                         /* successful, copy and store tuple */
1766                         EvalPlanQualSetTuple(epqstate, erm->rti,
1767                                                                  heap_copytuple(&tuple));
1768                         ReleaseBuffer(buffer);
1769                 }
1770                 else
1771                 {
1772                         HeapTupleHeader td;
1773
1774                         Assert(erm->markType == ROW_MARK_COPY);
1775
1776                         /* fetch the whole-row Var for the relation */
1777                         datum = ExecGetJunkAttribute(epqstate->origslot,
1778                                                                                  erm->wholeAttNo,
1779                                                                                  &isNull);
1780                         /* non-locked rels could be on the inside of outer joins */
1781                         if (isNull)
1782                                 continue;
1783                         td = DatumGetHeapTupleHeader(datum);
1784
1785                         /* build a temporary HeapTuple control structure */
1786                         tuple.t_len = HeapTupleHeaderGetDatumLength(td);
1787                         ItemPointerSetInvalid(&(tuple.t_self));
1788                         tuple.t_tableOid = InvalidOid;
1789                         tuple.t_data = td;
1790
1791                         /* copy and store tuple */
1792                         EvalPlanQualSetTuple(epqstate, erm->rti,
1793                                                                  heap_copytuple(&tuple));
1794                 }
1795         }
1796 }
1797
1798 /*
1799  * Fetch the next row (if any) from EvalPlanQual testing
1800  *
1801  * (In practice, there should never be more than one row...)
1802  */
1803 TupleTableSlot *
1804 EvalPlanQualNext(EPQState *epqstate)
1805 {
1806         MemoryContext oldcontext;
1807         TupleTableSlot *slot;
1808
1809         oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
1810         slot = ExecProcNode(epqstate->planstate);
1811         MemoryContextSwitchTo(oldcontext);
1812
1813         return slot;
1814 }
1815
1816 /*
1817  * Initialize or reset an EvalPlanQual state tree
1818  */
1819 void
1820 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
1821 {
1822         EState     *estate = epqstate->estate;
1823
1824         if (estate == NULL)
1825         {
1826                 /* First time through, so create a child EState */
1827                 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
1828         }
1829         else
1830         {
1831                 /*
1832                  * We already have a suitable child EPQ tree, so just reset it.
1833                  */
1834                 int                     rtsize = list_length(parentestate->es_range_table);
1835                 PlanState  *planstate = epqstate->planstate;
1836
1837                 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
1838
1839                 /* Recopy current values of parent parameters */
1840                 if (parentestate->es_plannedstmt->nParamExec > 0)
1841                 {
1842                         int             i = parentestate->es_plannedstmt->nParamExec;
1843
1844                         while (--i >= 0)
1845                         {
1846                                 /* copy value if any, but not execPlan link */
1847                                 estate->es_param_exec_vals[i].value =
1848                                         parentestate->es_param_exec_vals[i].value;
1849                                 estate->es_param_exec_vals[i].isnull =
1850                                         parentestate->es_param_exec_vals[i].isnull;
1851                         }
1852                 }
1853
1854                 /*
1855                  * Mark child plan tree as needing rescan at all scan nodes.  The
1856                  * first ExecProcNode will take care of actually doing the rescan.
1857                  */
1858                 planstate->chgParam = bms_add_member(planstate->chgParam,
1859                                                                                          epqstate->epqParam);
1860         }
1861 }
1862
1863 /*
1864  * Start execution of an EvalPlanQual plan tree.
1865  *
1866  * This is a cut-down version of ExecutorStart(): we copy some state from
1867  * the top-level estate rather than initializing it fresh.
1868  */
1869 static void
1870 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
1871 {
1872         EState     *estate;
1873         int                     rtsize;
1874         MemoryContext oldcontext;
1875         ListCell   *l;
1876
1877         rtsize = list_length(parentestate->es_range_table);
1878
1879         epqstate->estate = estate = CreateExecutorState();
1880
1881         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1882
1883         /*
1884          * Child EPQ EStates share the parent's copy of unchanging state such as
1885          * the snapshot, rangetable, result-rel info, and external Param info.
1886          * They need their own copies of local state, including a tuple table,
1887          * es_param_exec_vals, etc.
1888          */
1889         estate->es_direction = ForwardScanDirection;
1890         estate->es_snapshot = parentestate->es_snapshot;
1891         estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
1892         estate->es_range_table = parentestate->es_range_table;
1893         estate->es_plannedstmt = parentestate->es_plannedstmt;
1894         estate->es_junkFilter = parentestate->es_junkFilter;
1895         estate->es_output_cid = parentestate->es_output_cid;
1896         estate->es_result_relations = parentestate->es_result_relations;
1897         estate->es_num_result_relations = parentestate->es_num_result_relations;
1898         estate->es_result_relation_info = parentestate->es_result_relation_info;
1899         /* es_trig_target_relations must NOT be copied */
1900         estate->es_rowMarks = parentestate->es_rowMarks;
1901         estate->es_instrument = parentestate->es_instrument;
1902         estate->es_select_into = parentestate->es_select_into;
1903         estate->es_into_oids = parentestate->es_into_oids;
1904
1905         /*
1906          * The external param list is simply shared from parent.  The internal
1907          * param workspace has to be local state, but we copy the initial values
1908          * from the parent, so as to have access to any param values that were
1909          * already set from other parts of the parent's plan tree.
1910          */
1911         estate->es_param_list_info = parentestate->es_param_list_info;
1912         if (parentestate->es_plannedstmt->nParamExec > 0)
1913         {
1914                 int             i = parentestate->es_plannedstmt->nParamExec;
1915
1916                 estate->es_param_exec_vals = (ParamExecData *)
1917                         palloc0(i * sizeof(ParamExecData));
1918                 while (--i >= 0)
1919                 {
1920                         /* copy value if any, but not execPlan link */
1921                         estate->es_param_exec_vals[i].value =
1922                                 parentestate->es_param_exec_vals[i].value;
1923                         estate->es_param_exec_vals[i].isnull =
1924                                 parentestate->es_param_exec_vals[i].isnull;
1925                 }
1926         }
1927
1928         /*
1929          * Each EState must have its own es_epqScanDone state, but if we have
1930          * nested EPQ checks they should share es_epqTuple arrays.  This allows
1931          * sub-rechecks to inherit the values being examined by an outer recheck.
1932          */
1933         estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
1934         if (parentestate->es_epqTuple != NULL)
1935         {
1936                 estate->es_epqTuple = parentestate->es_epqTuple;
1937                 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
1938         }
1939         else
1940         {
1941                 estate->es_epqTuple = (HeapTuple *)
1942                         palloc0(rtsize * sizeof(HeapTuple));
1943                 estate->es_epqTupleSet = (bool *)
1944                         palloc0(rtsize * sizeof(bool));
1945         }
1946
1947         /*
1948          * Each estate also has its own tuple table.
1949          */
1950         estate->es_tupleTable = NIL;
1951
1952         /*
1953          * Initialize private state information for each SubPlan.  We must do this
1954          * before running ExecInitNode on the main query tree, since
1955          * ExecInitSubPlan expects to be able to find these entries.
1956          * Some of the SubPlans might not be used in the part of the plan tree
1957          * we intend to run, but since it's not easy to tell which, we just
1958          * initialize them all.
1959          */
1960         Assert(estate->es_subplanstates == NIL);
1961         foreach(l, parentestate->es_plannedstmt->subplans)
1962         {
1963                 Plan       *subplan = (Plan *) lfirst(l);
1964                 PlanState  *subplanstate;
1965
1966                 subplanstate = ExecInitNode(subplan, estate, 0);
1967
1968                 estate->es_subplanstates = lappend(estate->es_subplanstates,
1969                                                                                    subplanstate);
1970         }
1971
1972         /*
1973          * Initialize the private state information for all the nodes in the
1974          * part of the plan tree we need to run.  This opens files, allocates
1975          * storage and leaves us ready to start processing tuples.
1976          */
1977         epqstate->planstate = ExecInitNode(planTree, estate, 0);
1978
1979         MemoryContextSwitchTo(oldcontext);
1980 }
1981
1982 /*
1983  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
1984  * or if we are done with the current EPQ child.
1985  *
1986  * This is a cut-down version of ExecutorEnd(); basically we want to do most
1987  * of the normal cleanup, but *not* close result relations (which we are
1988  * just sharing from the outer query).  We do, however, have to close any
1989  * trigger target relations that got opened, since those are not shared.
1990  * (There probably shouldn't be any of the latter, but just in case...)
1991  */
1992 void
1993 EvalPlanQualEnd(EPQState *epqstate)
1994 {
1995         EState     *estate = epqstate->estate;
1996         MemoryContext oldcontext;
1997         ListCell   *l;
1998
1999         if (estate == NULL)
2000                 return;                                 /* idle, so nothing to do */
2001
2002         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2003
2004         ExecEndNode(epqstate->planstate);
2005
2006         foreach(l, estate->es_subplanstates)
2007         {
2008                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2009
2010                 ExecEndNode(subplanstate);
2011         }
2012
2013         /* throw away the per-estate tuple table */
2014         ExecResetTupleTable(estate->es_tupleTable, false);
2015
2016         /* close any trigger target relations attached to this EState */
2017         foreach(l, estate->es_trig_target_relations)
2018         {
2019                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2020
2021                 /* Close indices and then the relation itself */
2022                 ExecCloseIndices(resultRelInfo);
2023                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2024         }
2025
2026         MemoryContextSwitchTo(oldcontext);
2027
2028         FreeExecutorState(estate);
2029
2030         /* Mark EPQState idle */
2031         epqstate->estate = NULL;
2032         epqstate->planstate = NULL;
2033         epqstate->origslot = NULL;
2034 }
2035
2036
2037 /*
2038  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2039  *
2040  * We implement SELECT INTO by diverting SELECT's normal output with
2041  * a specialized DestReceiver type.
2042  */
2043
2044 typedef struct
2045 {
2046         DestReceiver pub;                       /* publicly-known function pointers */
2047         EState     *estate;                     /* EState we are working with */
2048         Relation        rel;                    /* Relation to write to */
2049         int                     hi_options;             /* heap_insert performance options */
2050         BulkInsertState bistate;        /* bulk insert state */
2051 } DR_intorel;
2052
2053 /*
2054  * OpenIntoRel --- actually create the SELECT INTO target relation
2055  *
2056  * This also replaces QueryDesc->dest with the special DestReceiver for
2057  * SELECT INTO.  We assume that the correct result tuple type has already
2058  * been placed in queryDesc->tupDesc.
2059  */
2060 static void
2061 OpenIntoRel(QueryDesc *queryDesc)
2062 {
2063         IntoClause *into = queryDesc->plannedstmt->intoClause;
2064         EState     *estate = queryDesc->estate;
2065         Relation        intoRelationDesc;
2066         char       *intoName;
2067         Oid                     namespaceId;
2068         Oid                     tablespaceId;
2069         Datum           reloptions;
2070         AclResult       aclresult;
2071         Oid                     intoRelationId;
2072         TupleDesc       tupdesc;
2073         DR_intorel *myState;
2074         static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
2075
2076         Assert(into);
2077
2078         /*
2079          * XXX This code needs to be kept in sync with DefineRelation().
2080          * Maybe we should try to use that function instead.
2081          */
2082
2083         /*
2084          * Check consistency of arguments
2085          */
2086         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2087                 ereport(ERROR,
2088                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2089                                  errmsg("ON COMMIT can only be used on temporary tables")));
2090
2091         /*
2092          * Security check: disallow creating temp tables from security-restricted
2093          * code.  This is needed because calling code might not expect untrusted
2094          * tables to appear in pg_temp at the front of its search path.
2095          */
2096         if (into->rel->istemp && InSecurityRestrictedOperation())
2097                 ereport(ERROR,
2098                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2099                                  errmsg("cannot create temporary table within security-restricted operation")));
2100
2101         /*
2102          * Find namespace to create in, check its permissions
2103          */
2104         intoName = into->rel->relname;
2105         namespaceId = RangeVarGetCreationNamespace(into->rel);
2106
2107         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2108                                                                           ACL_CREATE);
2109         if (aclresult != ACLCHECK_OK)
2110                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2111                                            get_namespace_name(namespaceId));
2112
2113         /*
2114          * Select tablespace to use.  If not specified, use default tablespace
2115          * (which may in turn default to database's default).
2116          */
2117         if (into->tableSpaceName)
2118         {
2119                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2120                 if (!OidIsValid(tablespaceId))
2121                         ereport(ERROR,
2122                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2123                                          errmsg("tablespace \"%s\" does not exist",
2124                                                         into->tableSpaceName)));
2125         }
2126         else
2127         {
2128                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2129                 /* note InvalidOid is OK in this case */
2130         }
2131
2132         /* Check permissions except when using the database's default space */
2133         if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2134         {
2135                 AclResult       aclresult;
2136
2137                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2138                                                                                    ACL_CREATE);
2139
2140                 if (aclresult != ACLCHECK_OK)
2141                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2142                                                    get_tablespace_name(tablespaceId));
2143         }
2144
2145         /* Parse and validate any reloptions */
2146         reloptions = transformRelOptions((Datum) 0,
2147                                                                          into->options,
2148                                                                          NULL,
2149                                                                          validnsps,
2150                                                                          true,
2151                                                                          false);
2152         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2153
2154         /* Copy the tupdesc because heap_create_with_catalog modifies it */
2155         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2156
2157         /* Now we can actually create the new relation */
2158         intoRelationId = heap_create_with_catalog(intoName,
2159                                                                                           namespaceId,
2160                                                                                           tablespaceId,
2161                                                                                           InvalidOid,
2162                                                                                           InvalidOid,
2163                                                                                           GetUserId(),
2164                                                                                           tupdesc,
2165                                                                                           NIL,
2166                                                                                           RELKIND_RELATION,
2167                                                                                           false,
2168                                                                                           true,
2169                                                                                           0,
2170                                                                                           into->onCommit,
2171                                                                                           reloptions,
2172                                                                                           true,
2173                                                                                           allowSystemTableMods);
2174
2175         FreeTupleDesc(tupdesc);
2176
2177         /*
2178          * Advance command counter so that the newly-created relation's catalog
2179          * tuples will be visible to heap_open.
2180          */
2181         CommandCounterIncrement();
2182
2183         /*
2184          * If necessary, create a TOAST table for the INTO relation. Note that
2185          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2186          * the TOAST table will be visible for insertion.
2187          */
2188         reloptions = transformRelOptions((Datum) 0,
2189                                                                          into->options,
2190                                                                          "toast",
2191                                                                          validnsps,
2192                                                                          true,
2193                                                                          false);
2194
2195         (void) heap_reloptions(RELKIND_TOASTVALUE, reloptions, true);
2196
2197         AlterTableCreateToastTable(intoRelationId, reloptions);
2198
2199         /*
2200          * And open the constructed table for writing.
2201          */
2202         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2203
2204         /*
2205          * Now replace the query's DestReceiver with one for SELECT INTO
2206          */
2207         queryDesc->dest = CreateDestReceiver(DestIntoRel);
2208         myState = (DR_intorel *) queryDesc->dest;
2209         Assert(myState->pub.mydest == DestIntoRel);
2210         myState->estate = estate;
2211         myState->rel = intoRelationDesc;
2212
2213         /*
2214          * We can skip WAL-logging the insertions, unless PITR is in use.  We can
2215          * skip the FSM in any case.
2216          */
2217         myState->hi_options = HEAP_INSERT_SKIP_FSM |
2218                 (XLogArchivingActive() ? 0 : HEAP_INSERT_SKIP_WAL);
2219         myState->bistate = GetBulkInsertState();
2220
2221         /* Not using WAL requires rd_targblock be initially invalid */
2222         Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2223 }
2224
2225 /*
2226  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2227  */
2228 static void
2229 CloseIntoRel(QueryDesc *queryDesc)
2230 {
2231         DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2232
2233         /* OpenIntoRel might never have gotten called */
2234         if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2235         {
2236                 FreeBulkInsertState(myState->bistate);
2237
2238                 /* If we skipped using WAL, must heap_sync before commit */
2239                 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2240                         heap_sync(myState->rel);
2241
2242                 /* close rel, but keep lock until commit */
2243                 heap_close(myState->rel, NoLock);
2244
2245                 myState->rel = NULL;
2246         }
2247 }
2248
2249 /*
2250  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2251  */
2252 DestReceiver *
2253 CreateIntoRelDestReceiver(void)
2254 {
2255         DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2256
2257         self->pub.receiveSlot = intorel_receive;
2258         self->pub.rStartup = intorel_startup;
2259         self->pub.rShutdown = intorel_shutdown;
2260         self->pub.rDestroy = intorel_destroy;
2261         self->pub.mydest = DestIntoRel;
2262
2263         /* private fields will be set by OpenIntoRel */
2264
2265         return (DestReceiver *) self;
2266 }
2267
2268 /*
2269  * intorel_startup --- executor startup
2270  */
2271 static void
2272 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2273 {
2274         /* no-op */
2275 }
2276
2277 /*
2278  * intorel_receive --- receive one tuple
2279  */
2280 static void
2281 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2282 {
2283         DR_intorel *myState = (DR_intorel *) self;
2284         HeapTuple       tuple;
2285
2286         /*
2287          * get the heap tuple out of the tuple table slot, making sure we have a
2288          * writable copy
2289          */
2290         tuple = ExecMaterializeSlot(slot);
2291
2292         /*
2293          * force assignment of new OID (see comments in ExecInsert)
2294          */
2295         if (myState->rel->rd_rel->relhasoids)
2296                 HeapTupleSetOid(tuple, InvalidOid);
2297
2298         heap_insert(myState->rel,
2299                                 tuple,
2300                                 myState->estate->es_output_cid,
2301                                 myState->hi_options,
2302                                 myState->bistate);
2303
2304         /* We know this is a newly created relation, so there are no indexes */
2305 }
2306
2307 /*
2308  * intorel_shutdown --- executor end
2309  */
2310 static void
2311 intorel_shutdown(DestReceiver *self)
2312 {
2313         /* no-op */
2314 }
2315
2316 /*
2317  * intorel_destroy --- release DestReceiver object
2318  */
2319 static void
2320 intorel_destroy(DestReceiver *self)
2321 {
2322         pfree(self);
2323 }