OSDN Git Service

8d7adfffbb6f260d1661f702707e1d89ec04a756
[pg-rex/syncrep.git] / src / backend / executor / execMain.c
1 /*-------------------------------------------------------------------------
2  *
3  * execMain.c
4  *        top level executor interface routines
5  *
6  * INTERFACE ROUTINES
7  *      ExecutorStart()
8  *      ExecutorRun()
9  *      ExecutorEnd()
10  *
11  *      The old ExecutorMain() has been replaced by ExecutorStart(),
12  *      ExecutorRun() and ExecutorEnd()
13  *
14  *      These three procedures are the external interfaces to the executor.
15  *      In each case, the query descriptor is required as an argument.
16  *
17  *      ExecutorStart() must be called at the beginning of execution of any
18  *      query plan and ExecutorEnd() should always be called at the end of
19  *      execution of a plan.
20  *
21  *      ExecutorRun accepts direction and count arguments that specify whether
22  *      the plan is to be executed forwards, backwards, and for how many tuples.
23  *
24  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
25  * Portions Copyright (c) 1994, Regents of the University of California
26  *
27  *
28  * IDENTIFICATION
29  *        $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.341 2010/01/08 02:44:00 tgl Exp $
30  *
31  *-------------------------------------------------------------------------
32  */
33 #include "postgres.h"
34
35 #include "access/reloptions.h"
36 #include "access/sysattr.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "miscadmin.h"
47 #include "optimizer/clauses.h"
48 #include "parser/parse_clause.h"
49 #include "parser/parsetree.h"
50 #include "storage/bufmgr.h"
51 #include "storage/lmgr.h"
52 #include "utils/acl.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
55 #include "utils/snapmgr.h"
56 #include "utils/tqual.h"
57
58
59 /* Hooks for plugins to get control in ExecutorStart/Run/End() */
60 ExecutorStart_hook_type ExecutorStart_hook = NULL;
61 ExecutorRun_hook_type ExecutorRun_hook = NULL;
62 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
63
64 /* decls for local routines only used within this module */
65 static void InitPlan(QueryDesc *queryDesc, int eflags);
66 static void ExecEndPlan(PlanState *planstate, EState *estate);
67 static void ExecutePlan(EState *estate, PlanState *planstate,
68                         CmdType operation,
69                         bool sendTuples,
70                         long numberTuples,
71                         ScanDirection direction,
72                         DestReceiver *dest);
73 static void ExecCheckRTPerms(List *rangeTable);
74 static void ExecCheckRTEPerms(RangeTblEntry *rte);
75 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
76 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
77                                                           Plan *planTree);
78 static void OpenIntoRel(QueryDesc *queryDesc);
79 static void CloseIntoRel(QueryDesc *queryDesc);
80 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
81 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
82 static void intorel_shutdown(DestReceiver *self);
83 static void intorel_destroy(DestReceiver *self);
84
85 /* end of local decls */
86
87
88 /* ----------------------------------------------------------------
89  *              ExecutorStart
90  *
91  *              This routine must be called at the beginning of any execution of any
92  *              query plan
93  *
94  * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
95  * clear why we bother to separate the two functions, but...).  The tupDesc
96  * field of the QueryDesc is filled in to describe the tuples that will be
97  * returned, and the internal fields (estate and planstate) are set up.
98  *
99  * eflags contains flag bits as described in executor.h.
100  *
101  * NB: the CurrentMemoryContext when this is called will become the parent
102  * of the per-query context used for this Executor invocation.
103  *
104  * We provide a function hook variable that lets loadable plugins
105  * get control when ExecutorStart is called.  Such a plugin would
106  * normally call standard_ExecutorStart().
107  *
108  * ----------------------------------------------------------------
109  */
110 void
111 ExecutorStart(QueryDesc *queryDesc, int eflags)
112 {
113         if (ExecutorStart_hook)
114                 (*ExecutorStart_hook) (queryDesc, eflags);
115         else
116                 standard_ExecutorStart(queryDesc, eflags);
117 }
118
119 void
120 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
121 {
122         EState     *estate;
123         MemoryContext oldcontext;
124
125         /* sanity checks: queryDesc must not be started already */
126         Assert(queryDesc != NULL);
127         Assert(queryDesc->estate == NULL);
128
129         /*
130          * If the transaction is read-only, we need to check if any writes are
131          * planned to non-temporary tables.  EXPLAIN is considered read-only.
132          */
133         if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
134                 ExecCheckXactReadOnly(queryDesc->plannedstmt);
135
136         /*
137          * Build EState, switch into per-query memory context for startup.
138          */
139         estate = CreateExecutorState();
140         queryDesc->estate = estate;
141
142         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
143
144         /*
145          * Fill in external parameters, if any, from queryDesc; and allocate
146          * workspace for internal parameters
147          */
148         estate->es_param_list_info = queryDesc->params;
149
150         if (queryDesc->plannedstmt->nParamExec > 0)
151                 estate->es_param_exec_vals = (ParamExecData *)
152                         palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
153
154         /*
155          * If non-read-only query, set the command ID to mark output tuples with
156          */
157         switch (queryDesc->operation)
158         {
159                 case CMD_SELECT:
160                         /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
161                         if (queryDesc->plannedstmt->intoClause != NULL ||
162                                 queryDesc->plannedstmt->rowMarks != NIL)
163                                 estate->es_output_cid = GetCurrentCommandId(true);
164                         break;
165
166                 case CMD_INSERT:
167                 case CMD_DELETE:
168                 case CMD_UPDATE:
169                         estate->es_output_cid = GetCurrentCommandId(true);
170                         break;
171
172                 default:
173                         elog(ERROR, "unrecognized operation code: %d",
174                                  (int) queryDesc->operation);
175                         break;
176         }
177
178         /*
179          * Copy other important information into the EState
180          */
181         estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
182         estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
183         estate->es_instrument = queryDesc->instrument_options;
184
185         /*
186          * Initialize the plan state tree
187          */
188         InitPlan(queryDesc, eflags);
189
190         MemoryContextSwitchTo(oldcontext);
191 }
192
193 /* ----------------------------------------------------------------
194  *              ExecutorRun
195  *
196  *              This is the main routine of the executor module. It accepts
197  *              the query descriptor from the traffic cop and executes the
198  *              query plan.
199  *
200  *              ExecutorStart must have been called already.
201  *
202  *              If direction is NoMovementScanDirection then nothing is done
203  *              except to start up/shut down the destination.  Otherwise,
204  *              we retrieve up to 'count' tuples in the specified direction.
205  *
206  *              Note: count = 0 is interpreted as no portal limit, i.e., run to
207  *              completion.
208  *
209  *              There is no return value, but output tuples (if any) are sent to
210  *              the destination receiver specified in the QueryDesc; and the number
211  *              of tuples processed at the top level can be found in
212  *              estate->es_processed.
213  *
214  *              We provide a function hook variable that lets loadable plugins
215  *              get control when ExecutorRun is called.  Such a plugin would
216  *              normally call standard_ExecutorRun().
217  *
218  * ----------------------------------------------------------------
219  */
220 void
221 ExecutorRun(QueryDesc *queryDesc,
222                         ScanDirection direction, long count)
223 {
224         if (ExecutorRun_hook)
225                 (*ExecutorRun_hook) (queryDesc, direction, count);
226         else
227                 standard_ExecutorRun(queryDesc, direction, count);
228 }
229
230 void
231 standard_ExecutorRun(QueryDesc *queryDesc,
232                                          ScanDirection direction, long count)
233 {
234         EState     *estate;
235         CmdType         operation;
236         DestReceiver *dest;
237         bool            sendTuples;
238         MemoryContext oldcontext;
239
240         /* sanity checks */
241         Assert(queryDesc != NULL);
242
243         estate = queryDesc->estate;
244
245         Assert(estate != NULL);
246
247         /*
248          * Switch into per-query memory context
249          */
250         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
251
252         /* Allow instrumentation of ExecutorRun overall runtime */
253         if (queryDesc->totaltime)
254                 InstrStartNode(queryDesc->totaltime);
255
256         /*
257          * extract information from the query descriptor and the query feature.
258          */
259         operation = queryDesc->operation;
260         dest = queryDesc->dest;
261
262         /*
263          * startup tuple receiver, if we will be emitting tuples
264          */
265         estate->es_processed = 0;
266         estate->es_lastoid = InvalidOid;
267
268         sendTuples = (operation == CMD_SELECT ||
269                                   queryDesc->plannedstmt->hasReturning);
270
271         if (sendTuples)
272                 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
273
274         /*
275          * run plan
276          */
277         if (!ScanDirectionIsNoMovement(direction))
278                 ExecutePlan(estate,
279                                         queryDesc->planstate,
280                                         operation,
281                                         sendTuples,
282                                         count,
283                                         direction,
284                                         dest);
285
286         /*
287          * shutdown tuple receiver, if we started it
288          */
289         if (sendTuples)
290                 (*dest->rShutdown) (dest);
291
292         if (queryDesc->totaltime)
293                 InstrStopNode(queryDesc->totaltime, estate->es_processed);
294
295         MemoryContextSwitchTo(oldcontext);
296 }
297
298 /* ----------------------------------------------------------------
299  *              ExecutorEnd
300  *
301  *              This routine must be called at the end of execution of any
302  *              query plan
303  *
304  *              We provide a function hook variable that lets loadable plugins
305  *              get control when ExecutorEnd is called.  Such a plugin would
306  *              normally call standard_ExecutorEnd().
307  *
308  * ----------------------------------------------------------------
309  */
310 void
311 ExecutorEnd(QueryDesc *queryDesc)
312 {
313         if (ExecutorEnd_hook)
314                 (*ExecutorEnd_hook) (queryDesc);
315         else
316                 standard_ExecutorEnd(queryDesc);
317 }
318
319 void
320 standard_ExecutorEnd(QueryDesc *queryDesc)
321 {
322         EState     *estate;
323         MemoryContext oldcontext;
324
325         /* sanity checks */
326         Assert(queryDesc != NULL);
327
328         estate = queryDesc->estate;
329
330         Assert(estate != NULL);
331
332         /*
333          * Switch into per-query memory context to run ExecEndPlan
334          */
335         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
336
337         ExecEndPlan(queryDesc->planstate, estate);
338
339         /*
340          * Close the SELECT INTO relation if any
341          */
342         if (estate->es_select_into)
343                 CloseIntoRel(queryDesc);
344
345         /* do away with our snapshots */
346         UnregisterSnapshot(estate->es_snapshot);
347         UnregisterSnapshot(estate->es_crosscheck_snapshot);
348
349         /*
350          * Must switch out of context before destroying it
351          */
352         MemoryContextSwitchTo(oldcontext);
353
354         /*
355          * Release EState and per-query memory context.  This should release
356          * everything the executor has allocated.
357          */
358         FreeExecutorState(estate);
359
360         /* Reset queryDesc fields that no longer point to anything */
361         queryDesc->tupDesc = NULL;
362         queryDesc->estate = NULL;
363         queryDesc->planstate = NULL;
364         queryDesc->totaltime = NULL;
365 }
366
367 /* ----------------------------------------------------------------
368  *              ExecutorRewind
369  *
370  *              This routine may be called on an open queryDesc to rewind it
371  *              to the start.
372  * ----------------------------------------------------------------
373  */
374 void
375 ExecutorRewind(QueryDesc *queryDesc)
376 {
377         EState     *estate;
378         MemoryContext oldcontext;
379
380         /* sanity checks */
381         Assert(queryDesc != NULL);
382
383         estate = queryDesc->estate;
384
385         Assert(estate != NULL);
386
387         /* It's probably not sensible to rescan updating queries */
388         Assert(queryDesc->operation == CMD_SELECT);
389
390         /*
391          * Switch into per-query memory context
392          */
393         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
394
395         /*
396          * rescan plan
397          */
398         ExecReScan(queryDesc->planstate, NULL);
399
400         MemoryContextSwitchTo(oldcontext);
401 }
402
403
404 /*
405  * ExecCheckRTPerms
406  *              Check access permissions for all relations listed in a range table.
407  */
408 static void
409 ExecCheckRTPerms(List *rangeTable)
410 {
411         ListCell   *l;
412
413         foreach(l, rangeTable)
414         {
415                 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
416         }
417 }
418
419 /*
420  * ExecCheckRTEPerms
421  *              Check access permissions for a single RTE.
422  */
423 static void
424 ExecCheckRTEPerms(RangeTblEntry *rte)
425 {
426         AclMode         requiredPerms;
427         AclMode         relPerms;
428         AclMode         remainingPerms;
429         Oid                     relOid;
430         Oid                     userid;
431         Bitmapset  *tmpset;
432         int                     col;
433
434         /*
435          * Only plain-relation RTEs need to be checked here.  Function RTEs are
436          * checked by init_fcache when the function is prepared for execution.
437          * Join, subquery, and special RTEs need no checks.
438          */
439         if (rte->rtekind != RTE_RELATION)
440                 return;
441
442         /*
443          * No work if requiredPerms is empty.
444          */
445         requiredPerms = rte->requiredPerms;
446         if (requiredPerms == 0)
447                 return;
448
449         relOid = rte->relid;
450
451         /*
452          * userid to check as: current user unless we have a setuid indication.
453          *
454          * Note: GetUserId() is presently fast enough that there's no harm in
455          * calling it separately for each RTE.  If that stops being true, we could
456          * call it once in ExecCheckRTPerms and pass the userid down from there.
457          * But for now, no need for the extra clutter.
458          */
459         userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
460
461         /*
462          * We must have *all* the requiredPerms bits, but some of the bits can be
463          * satisfied from column-level rather than relation-level permissions.
464          * First, remove any bits that are satisfied by relation permissions.
465          */
466         relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
467         remainingPerms = requiredPerms & ~relPerms;
468         if (remainingPerms != 0)
469         {
470                 /*
471                  * If we lack any permissions that exist only as relation permissions,
472                  * we can fail straight away.
473                  */
474                 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
475                         aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
476                                                    get_rel_name(relOid));
477
478                 /*
479                  * Check to see if we have the needed privileges at column level.
480                  *
481                  * Note: failures just report a table-level error; it would be nicer
482                  * to report a column-level error if we have some but not all of the
483                  * column privileges.
484                  */
485                 if (remainingPerms & ACL_SELECT)
486                 {
487                         /*
488                          * When the query doesn't explicitly reference any columns (for
489                          * example, SELECT COUNT(*) FROM table), allow the query if we
490                          * have SELECT on any column of the rel, as per SQL spec.
491                          */
492                         if (bms_is_empty(rte->selectedCols))
493                         {
494                                 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
495                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
496                                         aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
497                                                                    get_rel_name(relOid));
498                         }
499
500                         tmpset = bms_copy(rte->selectedCols);
501                         while ((col = bms_first_member(tmpset)) >= 0)
502                         {
503                                 /* remove the column number offset */
504                                 col += FirstLowInvalidHeapAttributeNumber;
505                                 if (col == InvalidAttrNumber)
506                                 {
507                                         /* Whole-row reference, must have priv on all cols */
508                                         if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
509                                                                                                   ACLMASK_ALL) != ACLCHECK_OK)
510                                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
511                                                                            get_rel_name(relOid));
512                                 }
513                                 else
514                                 {
515                                         if (pg_attribute_aclcheck(relOid, col, userid, ACL_SELECT)
516                                                 != ACLCHECK_OK)
517                                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
518                                                                            get_rel_name(relOid));
519                                 }
520                         }
521                         bms_free(tmpset);
522                 }
523
524                 /*
525                  * Basically the same for the mod columns, with either INSERT or
526                  * UPDATE privilege as specified by remainingPerms.
527                  */
528                 remainingPerms &= ~ACL_SELECT;
529                 if (remainingPerms != 0)
530                 {
531                         /*
532                          * When the query doesn't explicitly change any columns, allow the
533                          * query if we have permission on any column of the rel.  This is
534                          * to handle SELECT FOR UPDATE as well as possible corner cases in
535                          * INSERT and UPDATE.
536                          */
537                         if (bms_is_empty(rte->modifiedCols))
538                         {
539                                 if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
540                                                                                           ACLMASK_ANY) != ACLCHECK_OK)
541                                         aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
542                                                                    get_rel_name(relOid));
543                         }
544
545                         tmpset = bms_copy(rte->modifiedCols);
546                         while ((col = bms_first_member(tmpset)) >= 0)
547                         {
548                                 /* remove the column number offset */
549                                 col += FirstLowInvalidHeapAttributeNumber;
550                                 if (col == InvalidAttrNumber)
551                                 {
552                                         /* whole-row reference can't happen here */
553                                         elog(ERROR, "whole-row update is not implemented");
554                                 }
555                                 else
556                                 {
557                                         if (pg_attribute_aclcheck(relOid, col, userid, remainingPerms)
558                                                 != ACLCHECK_OK)
559                                                 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
560                                                                            get_rel_name(relOid));
561                                 }
562                         }
563                         bms_free(tmpset);
564                 }
565         }
566 }
567
568 /*
569  * Check that the query does not imply any writes to non-temp tables.
570  */
571 static void
572 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
573 {
574         ListCell   *l;
575
576         /*
577          * CREATE TABLE AS or SELECT INTO?
578          *
579          * XXX should we allow this if the destination is temp?
580          */
581         if (plannedstmt->intoClause != NULL)
582                 goto fail;
583
584         /* Fail if write permissions are requested on any non-temp table */
585         foreach(l, plannedstmt->rtable)
586         {
587                 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
588
589                 if (rte->rtekind != RTE_RELATION)
590                         continue;
591
592                 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
593                         continue;
594
595                 if (isTempNamespace(get_rel_namespace(rte->relid)))
596                         continue;
597
598                 goto fail;
599         }
600
601         return;
602
603 fail:
604         ereport(ERROR,
605                         (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
606                          errmsg("transaction is read-only")));
607 }
608
609
610 /* ----------------------------------------------------------------
611  *              InitPlan
612  *
613  *              Initializes the query plan: open files, allocate storage
614  *              and start up the rule manager
615  * ----------------------------------------------------------------
616  */
617 static void
618 InitPlan(QueryDesc *queryDesc, int eflags)
619 {
620         CmdType         operation = queryDesc->operation;
621         PlannedStmt *plannedstmt = queryDesc->plannedstmt;
622         Plan       *plan = plannedstmt->planTree;
623         List       *rangeTable = plannedstmt->rtable;
624         EState     *estate = queryDesc->estate;
625         PlanState  *planstate;
626         TupleDesc       tupType;
627         ListCell   *l;
628         int                     i;
629
630         /*
631          * Do permissions checks
632          */
633         ExecCheckRTPerms(rangeTable);
634
635         /*
636          * initialize the node's execution state
637          */
638         estate->es_range_table = rangeTable;
639         estate->es_plannedstmt = plannedstmt;
640
641         /*
642          * initialize result relation stuff, and open/lock the result rels.
643          *
644          * We must do this before initializing the plan tree, else we might
645          * try to do a lock upgrade if a result rel is also a source rel.
646          */
647         if (plannedstmt->resultRelations)
648         {
649                 List       *resultRelations = plannedstmt->resultRelations;
650                 int                     numResultRelations = list_length(resultRelations);
651                 ResultRelInfo *resultRelInfos;
652                 ResultRelInfo *resultRelInfo;
653
654                 resultRelInfos = (ResultRelInfo *)
655                         palloc(numResultRelations * sizeof(ResultRelInfo));
656                 resultRelInfo = resultRelInfos;
657                 foreach(l, resultRelations)
658                 {
659                         Index           resultRelationIndex = lfirst_int(l);
660                         Oid                     resultRelationOid;
661                         Relation        resultRelation;
662
663                         resultRelationOid = getrelid(resultRelationIndex, rangeTable);
664                         resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
665                         InitResultRelInfo(resultRelInfo,
666                                                           resultRelation,
667                                                           resultRelationIndex,
668                                                           operation,
669                                                           estate->es_instrument);
670                         resultRelInfo++;
671                 }
672                 estate->es_result_relations = resultRelInfos;
673                 estate->es_num_result_relations = numResultRelations;
674                 /* es_result_relation_info is NULL except when within ModifyTable */
675                 estate->es_result_relation_info = NULL;
676         }
677         else
678         {
679                 /*
680                  * if no result relation, then set state appropriately
681                  */
682                 estate->es_result_relations = NULL;
683                 estate->es_num_result_relations = 0;
684                 estate->es_result_relation_info = NULL;
685         }
686
687         /*
688          * Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
689          * before we initialize the plan tree, else we'd be risking lock
690          * upgrades.  While we are at it, build the ExecRowMark list.
691          */
692         estate->es_rowMarks = NIL;
693         foreach(l, plannedstmt->rowMarks)
694         {
695                 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
696                 Oid                     relid;
697                 Relation        relation;
698                 ExecRowMark *erm;
699
700                 /* ignore "parent" rowmarks; they are irrelevant at runtime */
701                 if (rc->isParent)
702                         continue;
703
704                 switch (rc->markType)
705                 {
706                         case ROW_MARK_EXCLUSIVE:
707                         case ROW_MARK_SHARE:
708                                 relid = getrelid(rc->rti, rangeTable);
709                                 relation = heap_open(relid, RowShareLock);
710                                 break;
711                         case ROW_MARK_REFERENCE:
712                                 relid = getrelid(rc->rti, rangeTable);
713                                 relation = heap_open(relid, AccessShareLock);
714                                 break;
715                         case ROW_MARK_COPY:
716                                 /* there's no real table here ... */
717                                 relation = NULL;
718                                 break;
719                         default:
720                                 elog(ERROR, "unrecognized markType: %d", rc->markType);
721                                 relation = NULL;        /* keep compiler quiet */
722                                 break;
723                 }
724
725                 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
726                 erm->relation = relation;
727                 erm->rti = rc->rti;
728                 erm->prti = rc->prti;
729                 erm->markType = rc->markType;
730                 erm->noWait = rc->noWait;
731                 erm->ctidAttNo = rc->ctidAttNo;
732                 erm->toidAttNo = rc->toidAttNo;
733                 erm->wholeAttNo = rc->wholeAttNo;
734                 ItemPointerSetInvalid(&(erm->curCtid));
735                 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
736         }
737
738         /*
739          * Detect whether we're doing SELECT INTO.  If so, set the es_into_oids
740          * flag appropriately so that the plan tree will be initialized with the
741          * correct tuple descriptors.  (Other SELECT INTO stuff comes later.)
742          */
743         estate->es_select_into = false;
744         if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
745         {
746                 estate->es_select_into = true;
747                 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
748         }
749
750         /*
751          * Initialize the executor's tuple table to empty.
752          */
753         estate->es_tupleTable = NIL;
754         estate->es_trig_tuple_slot = NULL;
755         estate->es_trig_oldtup_slot = NULL;
756
757         /* mark EvalPlanQual not active */
758         estate->es_epqTuple = NULL;
759         estate->es_epqTupleSet = NULL;
760         estate->es_epqScanDone = NULL;
761
762         /*
763          * Initialize private state information for each SubPlan.  We must do this
764          * before running ExecInitNode on the main query tree, since
765          * ExecInitSubPlan expects to be able to find these entries.
766          */
767         Assert(estate->es_subplanstates == NIL);
768         i = 1;                                          /* subplan indices count from 1 */
769         foreach(l, plannedstmt->subplans)
770         {
771                 Plan       *subplan = (Plan *) lfirst(l);
772                 PlanState  *subplanstate;
773                 int                     sp_eflags;
774
775                 /*
776                  * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
777                  * it is a parameterless subplan (not initplan), we suggest that it be
778                  * prepared to handle REWIND efficiently; otherwise there is no need.
779                  */
780                 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
781                 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
782                         sp_eflags |= EXEC_FLAG_REWIND;
783
784                 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
785
786                 estate->es_subplanstates = lappend(estate->es_subplanstates,
787                                                                                    subplanstate);
788
789                 i++;
790         }
791
792         /*
793          * Initialize the private state information for all the nodes in the query
794          * tree.  This opens files, allocates storage and leaves us ready to start
795          * processing tuples.
796          */
797         planstate = ExecInitNode(plan, estate, eflags);
798
799         /*
800          * Get the tuple descriptor describing the type of tuples to return. (this
801          * is especially important if we are creating a relation with "SELECT
802          * INTO")
803          */
804         tupType = ExecGetResultType(planstate);
805
806         /*
807          * Initialize the junk filter if needed.  SELECT queries need a
808          * filter if there are any junk attrs in the top-level tlist.
809          */
810         if (operation == CMD_SELECT)
811         {
812                 bool            junk_filter_needed = false;
813                 ListCell   *tlist;
814
815                 foreach(tlist, plan->targetlist)
816                 {
817                         TargetEntry *tle = (TargetEntry *) lfirst(tlist);
818
819                         if (tle->resjunk)
820                         {
821                                 junk_filter_needed = true;
822                                 break;
823                         }
824                 }
825
826                 if (junk_filter_needed)
827                 {
828                         JunkFilter *j;
829
830                         j = ExecInitJunkFilter(planstate->plan->targetlist,
831                                                                    tupType->tdhasoid,
832                                                                    ExecInitExtraTupleSlot(estate));
833                         estate->es_junkFilter = j;
834
835                         /* Want to return the cleaned tuple type */
836                         tupType = j->jf_cleanTupType;
837                 }
838         }
839
840         queryDesc->tupDesc = tupType;
841         queryDesc->planstate = planstate;
842
843         /*
844          * If doing SELECT INTO, initialize the "into" relation.  We must wait
845          * till now so we have the "clean" result tuple type to create the new
846          * table from.
847          *
848          * If EXPLAIN, skip creating the "into" relation.
849          */
850         if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
851                 OpenIntoRel(queryDesc);
852 }
853
854 /*
855  * Initialize ResultRelInfo data for one result relation
856  */
857 void
858 InitResultRelInfo(ResultRelInfo *resultRelInfo,
859                                   Relation resultRelationDesc,
860                                   Index resultRelationIndex,
861                                   CmdType operation,
862                                   int instrument_options)
863 {
864         /*
865          * Check valid relkind ... parser and/or planner should have noticed this
866          * already, but let's make sure.
867          */
868         switch (resultRelationDesc->rd_rel->relkind)
869         {
870                 case RELKIND_RELATION:
871                         /* OK */
872                         break;
873                 case RELKIND_SEQUENCE:
874                         ereport(ERROR,
875                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
876                                          errmsg("cannot change sequence \"%s\"",
877                                                         RelationGetRelationName(resultRelationDesc))));
878                         break;
879                 case RELKIND_TOASTVALUE:
880                         ereport(ERROR,
881                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
882                                          errmsg("cannot change TOAST relation \"%s\"",
883                                                         RelationGetRelationName(resultRelationDesc))));
884                         break;
885                 case RELKIND_VIEW:
886                         ereport(ERROR,
887                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
888                                          errmsg("cannot change view \"%s\"",
889                                                         RelationGetRelationName(resultRelationDesc))));
890                         break;
891                 default:
892                         ereport(ERROR,
893                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
894                                          errmsg("cannot change relation \"%s\"",
895                                                         RelationGetRelationName(resultRelationDesc))));
896                         break;
897         }
898
899         /* OK, fill in the node */
900         MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
901         resultRelInfo->type = T_ResultRelInfo;
902         resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
903         resultRelInfo->ri_RelationDesc = resultRelationDesc;
904         resultRelInfo->ri_NumIndices = 0;
905         resultRelInfo->ri_IndexRelationDescs = NULL;
906         resultRelInfo->ri_IndexRelationInfo = NULL;
907         /* make a copy so as not to depend on relcache info not changing... */
908         resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
909         if (resultRelInfo->ri_TrigDesc)
910         {
911                 int                     n = resultRelInfo->ri_TrigDesc->numtriggers;
912
913                 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
914                         palloc0(n * sizeof(FmgrInfo));
915                 resultRelInfo->ri_TrigWhenExprs = (List **)
916                         palloc0(n * sizeof(List *));
917                 if (instrument_options)
918                         resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
919         }
920         else
921         {
922                 resultRelInfo->ri_TrigFunctions = NULL;
923                 resultRelInfo->ri_TrigWhenExprs = NULL;
924                 resultRelInfo->ri_TrigInstrument = NULL;
925         }
926         resultRelInfo->ri_ConstraintExprs = NULL;
927         resultRelInfo->ri_junkFilter = NULL;
928         resultRelInfo->ri_projectReturning = NULL;
929
930         /*
931          * If there are indices on the result relation, open them and save
932          * descriptors in the result relation info, so that we can add new index
933          * entries for the tuples we add/update.  We need not do this for a
934          * DELETE, however, since deletion doesn't affect indexes.
935          */
936         if (resultRelationDesc->rd_rel->relhasindex &&
937                 operation != CMD_DELETE)
938                 ExecOpenIndices(resultRelInfo);
939 }
940
941 /*
942  *              ExecGetTriggerResultRel
943  *
944  * Get a ResultRelInfo for a trigger target relation.  Most of the time,
945  * triggers are fired on one of the result relations of the query, and so
946  * we can just return a member of the es_result_relations array.  (Note: in
947  * self-join situations there might be multiple members with the same OID;
948  * if so it doesn't matter which one we pick.)  However, it is sometimes
949  * necessary to fire triggers on other relations; this happens mainly when an
950  * RI update trigger queues additional triggers on other relations, which will
951  * be processed in the context of the outer query.      For efficiency's sake,
952  * we want to have a ResultRelInfo for those triggers too; that can avoid
953  * repeated re-opening of the relation.  (It also provides a way for EXPLAIN
954  * ANALYZE to report the runtimes of such triggers.)  So we make additional
955  * ResultRelInfo's as needed, and save them in es_trig_target_relations.
956  */
957 ResultRelInfo *
958 ExecGetTriggerResultRel(EState *estate, Oid relid)
959 {
960         ResultRelInfo *rInfo;
961         int                     nr;
962         ListCell   *l;
963         Relation        rel;
964         MemoryContext oldcontext;
965
966         /* First, search through the query result relations */
967         rInfo = estate->es_result_relations;
968         nr = estate->es_num_result_relations;
969         while (nr > 0)
970         {
971                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
972                         return rInfo;
973                 rInfo++;
974                 nr--;
975         }
976         /* Nope, but maybe we already made an extra ResultRelInfo for it */
977         foreach(l, estate->es_trig_target_relations)
978         {
979                 rInfo = (ResultRelInfo *) lfirst(l);
980                 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
981                         return rInfo;
982         }
983         /* Nope, so we need a new one */
984
985         /*
986          * Open the target relation's relcache entry.  We assume that an
987          * appropriate lock is still held by the backend from whenever the trigger
988          * event got queued, so we need take no new lock here.
989          */
990         rel = heap_open(relid, NoLock);
991
992         /*
993          * Make the new entry in the right context.  Currently, we don't need any
994          * index information in ResultRelInfos used only for triggers, so tell
995          * InitResultRelInfo it's a DELETE.
996          */
997         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
998         rInfo = makeNode(ResultRelInfo);
999         InitResultRelInfo(rInfo,
1000                                           rel,
1001                                           0,            /* dummy rangetable index */
1002                                           CMD_DELETE,
1003                                           estate->es_instrument);
1004         estate->es_trig_target_relations =
1005                 lappend(estate->es_trig_target_relations, rInfo);
1006         MemoryContextSwitchTo(oldcontext);
1007
1008         return rInfo;
1009 }
1010
1011 /*
1012  *              ExecContextForcesOids
1013  *
1014  * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1015  * we need to ensure that result tuples have space for an OID iff they are
1016  * going to be stored into a relation that has OIDs.  In other contexts
1017  * we are free to choose whether to leave space for OIDs in result tuples
1018  * (we generally don't want to, but we do if a physical-tlist optimization
1019  * is possible).  This routine checks the plan context and returns TRUE if the
1020  * choice is forced, FALSE if the choice is not forced.  In the TRUE case,
1021  * *hasoids is set to the required value.
1022  *
1023  * One reason this is ugly is that all plan nodes in the plan tree will emit
1024  * tuples with space for an OID, though we really only need the topmost node
1025  * to do so.  However, node types like Sort don't project new tuples but just
1026  * return their inputs, and in those cases the requirement propagates down
1027  * to the input node.  Eventually we might make this code smart enough to
1028  * recognize how far down the requirement really goes, but for now we just
1029  * make all plan nodes do the same thing if the top level forces the choice.
1030  *
1031  * We assume that if we are generating tuples for INSERT or UPDATE,
1032  * estate->es_result_relation_info is already set up to describe the target
1033  * relation.  Note that in an UPDATE that spans an inheritance tree, some of
1034  * the target relations may have OIDs and some not.  We have to make the
1035  * decisions on a per-relation basis as we initialize each of the subplans of
1036  * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1037  * while initializing each subplan.
1038  *
1039  * SELECT INTO is even uglier, because we don't have the INTO relation's
1040  * descriptor available when this code runs; we have to look aside at a
1041  * flag set by InitPlan().
1042  */
1043 bool
1044 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1045 {
1046         ResultRelInfo *ri = planstate->state->es_result_relation_info;
1047
1048         if (ri != NULL)
1049         {
1050                 Relation        rel = ri->ri_RelationDesc;
1051
1052                 if (rel != NULL)
1053                 {
1054                         *hasoids = rel->rd_rel->relhasoids;
1055                         return true;
1056                 }
1057         }
1058
1059         if (planstate->state->es_select_into)
1060         {
1061                 *hasoids = planstate->state->es_into_oids;
1062                 return true;
1063         }
1064
1065         return false;
1066 }
1067
1068 /* ----------------------------------------------------------------
1069  *              ExecEndPlan
1070  *
1071  *              Cleans up the query plan -- closes files and frees up storage
1072  *
1073  * NOTE: we are no longer very worried about freeing storage per se
1074  * in this code; FreeExecutorState should be guaranteed to release all
1075  * memory that needs to be released.  What we are worried about doing
1076  * is closing relations and dropping buffer pins.  Thus, for example,
1077  * tuple tables must be cleared or dropped to ensure pins are released.
1078  * ----------------------------------------------------------------
1079  */
1080 static void
1081 ExecEndPlan(PlanState *planstate, EState *estate)
1082 {
1083         ResultRelInfo *resultRelInfo;
1084         int                     i;
1085         ListCell   *l;
1086
1087         /*
1088          * shut down the node-type-specific query processing
1089          */
1090         ExecEndNode(planstate);
1091
1092         /*
1093          * for subplans too
1094          */
1095         foreach(l, estate->es_subplanstates)
1096         {
1097                 PlanState  *subplanstate = (PlanState *) lfirst(l);
1098
1099                 ExecEndNode(subplanstate);
1100         }
1101
1102         /*
1103          * destroy the executor's tuple table.  Actually we only care about
1104          * releasing buffer pins and tupdesc refcounts; there's no need to
1105          * pfree the TupleTableSlots, since the containing memory context
1106          * is about to go away anyway.
1107          */
1108         ExecResetTupleTable(estate->es_tupleTable, false);
1109
1110         /*
1111          * close the result relation(s) if any, but hold locks until xact commit.
1112          */
1113         resultRelInfo = estate->es_result_relations;
1114         for (i = estate->es_num_result_relations; i > 0; i--)
1115         {
1116                 /* Close indices and then the relation itself */
1117                 ExecCloseIndices(resultRelInfo);
1118                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1119                 resultRelInfo++;
1120         }
1121
1122         /*
1123          * likewise close any trigger target relations
1124          */
1125         foreach(l, estate->es_trig_target_relations)
1126         {
1127                 resultRelInfo = (ResultRelInfo *) lfirst(l);
1128                 /* Close indices and then the relation itself */
1129                 ExecCloseIndices(resultRelInfo);
1130                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1131         }
1132
1133         /*
1134          * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1135          */
1136         foreach(l, estate->es_rowMarks)
1137         {
1138                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1139
1140                 if (erm->relation)
1141                         heap_close(erm->relation, NoLock);
1142         }
1143 }
1144
1145 /* ----------------------------------------------------------------
1146  *              ExecutePlan
1147  *
1148  *              Processes the query plan until we have processed 'numberTuples' tuples,
1149  *              moving in the specified direction.
1150  *
1151  *              Runs to completion if numberTuples is 0
1152  *
1153  * Note: the ctid attribute is a 'junk' attribute that is removed before the
1154  * user can see it
1155  * ----------------------------------------------------------------
1156  */
1157 static void
1158 ExecutePlan(EState *estate,
1159                         PlanState *planstate,
1160                         CmdType operation,
1161                         bool sendTuples,
1162                         long numberTuples,
1163                         ScanDirection direction,
1164                         DestReceiver *dest)
1165 {
1166         TupleTableSlot *slot;
1167         long            current_tuple_count;
1168
1169         /*
1170          * initialize local variables
1171          */
1172         current_tuple_count = 0;
1173
1174         /*
1175          * Set the direction.
1176          */
1177         estate->es_direction = direction;
1178
1179         /*
1180          * Loop until we've processed the proper number of tuples from the plan.
1181          */
1182         for (;;)
1183         {
1184                 /* Reset the per-output-tuple exprcontext */
1185                 ResetPerTupleExprContext(estate);
1186
1187                 /*
1188                  * Execute the plan and obtain a tuple
1189                  */
1190                 slot = ExecProcNode(planstate);
1191
1192                 /*
1193                  * if the tuple is null, then we assume there is nothing more to
1194                  * process so we just end the loop...
1195                  */
1196                 if (TupIsNull(slot))
1197                         break;
1198
1199                 /*
1200                  * If we have a junk filter, then project a new tuple with the junk
1201                  * removed.
1202                  *
1203                  * Store this new "clean" tuple in the junkfilter's resultSlot.
1204                  * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1205                  * because that tuple slot has the wrong descriptor.)
1206                  */
1207                 if (estate->es_junkFilter != NULL)
1208                         slot = ExecFilterJunk(estate->es_junkFilter, slot);
1209
1210                 /*
1211                  * If we are supposed to send the tuple somewhere, do so.
1212                  * (In practice, this is probably always the case at this point.)
1213                  */
1214                 if (sendTuples)
1215                         (*dest->receiveSlot) (slot, dest);
1216
1217                 /*
1218                  * Count tuples processed, if this is a SELECT.  (For other operation
1219                  * types, the ModifyTable plan node must count the appropriate
1220                  * events.)
1221                  */
1222                 if (operation == CMD_SELECT)
1223                         (estate->es_processed)++;
1224
1225                 /*
1226                  * check our tuple count.. if we've processed the proper number then
1227                  * quit, else loop again and process more tuples.  Zero numberTuples
1228                  * means no limit.
1229                  */
1230                 current_tuple_count++;
1231                 if (numberTuples && numberTuples == current_tuple_count)
1232                         break;
1233         }
1234 }
1235
1236
1237 /*
1238  * ExecRelCheck --- check that tuple meets constraints for result relation
1239  */
1240 static const char *
1241 ExecRelCheck(ResultRelInfo *resultRelInfo,
1242                          TupleTableSlot *slot, EState *estate)
1243 {
1244         Relation        rel = resultRelInfo->ri_RelationDesc;
1245         int                     ncheck = rel->rd_att->constr->num_check;
1246         ConstrCheck *check = rel->rd_att->constr->check;
1247         ExprContext *econtext;
1248         MemoryContext oldContext;
1249         List       *qual;
1250         int                     i;
1251
1252         /*
1253          * If first time through for this result relation, build expression
1254          * nodetrees for rel's constraint expressions.  Keep them in the per-query
1255          * memory context so they'll survive throughout the query.
1256          */
1257         if (resultRelInfo->ri_ConstraintExprs == NULL)
1258         {
1259                 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1260                 resultRelInfo->ri_ConstraintExprs =
1261                         (List **) palloc(ncheck * sizeof(List *));
1262                 for (i = 0; i < ncheck; i++)
1263                 {
1264                         /* ExecQual wants implicit-AND form */
1265                         qual = make_ands_implicit(stringToNode(check[i].ccbin));
1266                         resultRelInfo->ri_ConstraintExprs[i] = (List *)
1267                                 ExecPrepareExpr((Expr *) qual, estate);
1268                 }
1269                 MemoryContextSwitchTo(oldContext);
1270         }
1271
1272         /*
1273          * We will use the EState's per-tuple context for evaluating constraint
1274          * expressions (creating it if it's not already there).
1275          */
1276         econtext = GetPerTupleExprContext(estate);
1277
1278         /* Arrange for econtext's scan tuple to be the tuple under test */
1279         econtext->ecxt_scantuple = slot;
1280
1281         /* And evaluate the constraints */
1282         for (i = 0; i < ncheck; i++)
1283         {
1284                 qual = resultRelInfo->ri_ConstraintExprs[i];
1285
1286                 /*
1287                  * NOTE: SQL92 specifies that a NULL result from a constraint
1288                  * expression is not to be treated as a failure.  Therefore, tell
1289                  * ExecQual to return TRUE for NULL.
1290                  */
1291                 if (!ExecQual(qual, econtext, true))
1292                         return check[i].ccname;
1293         }
1294
1295         /* NULL result means no error */
1296         return NULL;
1297 }
1298
1299 void
1300 ExecConstraints(ResultRelInfo *resultRelInfo,
1301                                 TupleTableSlot *slot, EState *estate)
1302 {
1303         Relation        rel = resultRelInfo->ri_RelationDesc;
1304         TupleConstr *constr = rel->rd_att->constr;
1305
1306         Assert(constr);
1307
1308         if (constr->has_not_null)
1309         {
1310                 int                     natts = rel->rd_att->natts;
1311                 int                     attrChk;
1312
1313                 for (attrChk = 1; attrChk <= natts; attrChk++)
1314                 {
1315                         if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1316                                 slot_attisnull(slot, attrChk))
1317                                 ereport(ERROR,
1318                                                 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1319                                                  errmsg("null value in column \"%s\" violates not-null constraint",
1320                                                 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1321                 }
1322         }
1323
1324         if (constr->num_check > 0)
1325         {
1326                 const char *failed;
1327
1328                 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1329                         ereport(ERROR,
1330                                         (errcode(ERRCODE_CHECK_VIOLATION),
1331                                          errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1332                                                         RelationGetRelationName(rel), failed)));
1333         }
1334 }
1335
1336
1337 /*
1338  * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
1339  * process the updated version under READ COMMITTED rules.
1340  *
1341  * See backend/executor/README for some info about how this works.
1342  */
1343
1344
1345 /*
1346  * Check a modified tuple to see if we want to process its updated version
1347  * under READ COMMITTED rules.
1348  *
1349  *      estate - outer executor state data
1350  *      epqstate - state for EvalPlanQual rechecking
1351  *      relation - table containing tuple
1352  *      rti - rangetable index of table containing tuple
1353  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1354  *      priorXmax - t_xmax from the outdated tuple
1355  *
1356  * *tid is also an output parameter: it's modified to hold the TID of the
1357  * latest version of the tuple (note this may be changed even on failure)
1358  *
1359  * Returns a slot containing the new candidate update/delete tuple, or
1360  * NULL if we determine we shouldn't process the row.
1361  */
1362 TupleTableSlot *
1363 EvalPlanQual(EState *estate, EPQState *epqstate,
1364                          Relation relation, Index rti,
1365                          ItemPointer tid, TransactionId priorXmax)
1366 {
1367         TupleTableSlot *slot;
1368         HeapTuple       copyTuple;
1369
1370         Assert(rti > 0);
1371
1372         /*
1373          * Get and lock the updated version of the row; if fail, return NULL.
1374          */
1375         copyTuple = EvalPlanQualFetch(estate, relation, LockTupleExclusive,
1376                                                                   tid, priorXmax);
1377
1378         if (copyTuple == NULL)
1379                 return NULL;
1380
1381         /*
1382          * For UPDATE/DELETE we have to return tid of actual row we're executing
1383          * PQ for.
1384          */
1385         *tid = copyTuple->t_self;
1386
1387         /*
1388          * Need to run a recheck subquery.      Initialize or reinitialize EPQ state.
1389          */
1390         EvalPlanQualBegin(epqstate, estate);
1391
1392         /*
1393          * Free old test tuple, if any, and store new tuple where relation's
1394          * scan node will see it
1395          */
1396         EvalPlanQualSetTuple(epqstate, rti, copyTuple);
1397
1398         /*
1399          * Fetch any non-locked source rows
1400          */
1401         EvalPlanQualFetchRowMarks(epqstate);
1402
1403         /*
1404          * Run the EPQ query.  We assume it will return at most one tuple.
1405          */
1406         slot = EvalPlanQualNext(epqstate);
1407
1408         /*
1409          * If we got a tuple, force the slot to materialize the tuple so that
1410          * it is not dependent on any local state in the EPQ query (in particular,
1411          * it's highly likely that the slot contains references to any pass-by-ref
1412          * datums that may be present in copyTuple).  As with the next step,
1413          * this is to guard against early re-use of the EPQ query.
1414          */
1415         if (!TupIsNull(slot))
1416                 (void) ExecMaterializeSlot(slot);
1417
1418         /*
1419          * Clear out the test tuple.  This is needed in case the EPQ query
1420          * is re-used to test a tuple for a different relation.  (Not clear
1421          * that can really happen, but let's be safe.)
1422          */
1423         EvalPlanQualSetTuple(epqstate, rti, NULL);
1424
1425         return slot;
1426 }
1427
1428 /*
1429  * Fetch a copy of the newest version of an outdated tuple
1430  *
1431  *      estate - executor state data
1432  *      relation - table containing tuple
1433  *      lockmode - requested tuple lock mode
1434  *      *tid - t_ctid from the outdated tuple (ie, next updated version)
1435  *      priorXmax - t_xmax from the outdated tuple
1436  *
1437  * Returns a palloc'd copy of the newest tuple version, or NULL if we find
1438  * that there is no newest version (ie, the row was deleted not updated).
1439  * If successful, we have locked the newest tuple version, so caller does not
1440  * need to worry about it changing anymore.
1441  *
1442  * Note: properly, lockmode should be declared as enum LockTupleMode,
1443  * but we use "int" to avoid having to include heapam.h in executor.h.
1444  */
1445 HeapTuple
1446 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
1447                                   ItemPointer tid, TransactionId priorXmax)
1448 {
1449         HeapTuple       copyTuple = NULL;
1450         HeapTupleData tuple;
1451         SnapshotData SnapshotDirty;
1452
1453         /*
1454          * fetch target tuple
1455          *
1456          * Loop here to deal with updated or busy tuples
1457          */
1458         InitDirtySnapshot(SnapshotDirty);
1459         tuple.t_self = *tid;
1460         for (;;)
1461         {
1462                 Buffer          buffer;
1463
1464                 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
1465                 {
1466                         HTSU_Result test;
1467                         ItemPointerData update_ctid;
1468                         TransactionId update_xmax;
1469
1470                         /*
1471                          * If xmin isn't what we're expecting, the slot must have been
1472                          * recycled and reused for an unrelated tuple.  This implies that
1473                          * the latest version of the row was deleted, so we need do
1474                          * nothing.  (Should be safe to examine xmin without getting
1475                          * buffer's content lock, since xmin never changes in an existing
1476                          * tuple.)
1477                          */
1478                         if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1479                                                                          priorXmax))
1480                         {
1481                                 ReleaseBuffer(buffer);
1482                                 return NULL;
1483                         }
1484
1485                         /* otherwise xmin should not be dirty... */
1486                         if (TransactionIdIsValid(SnapshotDirty.xmin))
1487                                 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1488
1489                         /*
1490                          * If tuple is being updated by other transaction then we have to
1491                          * wait for its commit/abort.
1492                          */
1493                         if (TransactionIdIsValid(SnapshotDirty.xmax))
1494                         {
1495                                 ReleaseBuffer(buffer);
1496                                 XactLockTableWait(SnapshotDirty.xmax);
1497                                 continue;               /* loop back to repeat heap_fetch */
1498                         }
1499
1500                         /*
1501                          * If tuple was inserted by our own transaction, we have to check
1502                          * cmin against es_output_cid: cmin >= current CID means our
1503                          * command cannot see the tuple, so we should ignore it.  Without
1504                          * this we are open to the "Halloween problem" of indefinitely
1505                          * re-updating the same tuple. (We need not check cmax because
1506                          * HeapTupleSatisfiesDirty will consider a tuple deleted by our
1507                          * transaction dead, regardless of cmax.)  We just checked that
1508                          * priorXmax == xmin, so we can test that variable instead of
1509                          * doing HeapTupleHeaderGetXmin again.
1510                          */
1511                         if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1512                                 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
1513                         {
1514                                 ReleaseBuffer(buffer);
1515                                 return NULL;
1516                         }
1517
1518                         /*
1519                          * This is a live tuple, so now try to lock it.
1520                          */
1521                         test = heap_lock_tuple(relation, &tuple, &buffer,
1522                                                                    &update_ctid, &update_xmax,
1523                                                                    estate->es_output_cid,
1524                                                                    lockmode, false);
1525                         /* We now have two pins on the buffer, get rid of one */
1526                         ReleaseBuffer(buffer);
1527
1528                         switch (test)
1529                         {
1530                                 case HeapTupleSelfUpdated:
1531                                         /* treat it as deleted; do not process */
1532                                         ReleaseBuffer(buffer);
1533                                         return NULL;
1534
1535                                 case HeapTupleMayBeUpdated:
1536                                         /* successfully locked */
1537                                         break;
1538
1539                                 case HeapTupleUpdated:
1540                                         ReleaseBuffer(buffer);
1541                                         if (IsXactIsoLevelSerializable)
1542                                                 ereport(ERROR,
1543                                                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1544                                                                  errmsg("could not serialize access due to concurrent update")));
1545                                         if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
1546                                         {
1547                                                 /* it was updated, so look at the updated version */
1548                                                 tuple.t_self = update_ctid;
1549                                                 /* updated row should have xmin matching this xmax */
1550                                                 priorXmax = update_xmax;
1551                                                 continue;
1552                                         }
1553                                         /* tuple was deleted, so give up */
1554                                         return NULL;
1555
1556                                 default:
1557                                         ReleaseBuffer(buffer);
1558                                         elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1559                                                  test);
1560                                         return NULL;    /* keep compiler quiet */
1561                         }
1562
1563                         /*
1564                          * We got tuple - now copy it for use by recheck query.
1565                          */
1566                         copyTuple = heap_copytuple(&tuple);
1567                         ReleaseBuffer(buffer);
1568                         break;
1569                 }
1570
1571                 /*
1572                  * If the referenced slot was actually empty, the latest version of
1573                  * the row must have been deleted, so we need do nothing.
1574                  */
1575                 if (tuple.t_data == NULL)
1576                 {
1577                         ReleaseBuffer(buffer);
1578                         return NULL;
1579                 }
1580
1581                 /*
1582                  * As above, if xmin isn't what we're expecting, do nothing.
1583                  */
1584                 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1585                                                                  priorXmax))
1586                 {
1587                         ReleaseBuffer(buffer);
1588                         return NULL;
1589                 }
1590
1591                 /*
1592                  * If we get here, the tuple was found but failed SnapshotDirty.
1593                  * Assuming the xmin is either a committed xact or our own xact (as it
1594                  * certainly should be if we're trying to modify the tuple), this must
1595                  * mean that the row was updated or deleted by either a committed xact
1596                  * or our own xact.  If it was deleted, we can ignore it; if it was
1597                  * updated then chain up to the next version and repeat the whole
1598                  * process.
1599                  *
1600                  * As above, it should be safe to examine xmax and t_ctid without the
1601                  * buffer content lock, because they can't be changing.
1602                  */
1603                 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
1604                 {
1605                         /* deleted, so forget about it */
1606                         ReleaseBuffer(buffer);
1607                         return NULL;
1608                 }
1609
1610                 /* updated, so look at the updated row */
1611                 tuple.t_self = tuple.t_data->t_ctid;
1612                 /* updated row should have xmin matching this xmax */
1613                 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
1614                 ReleaseBuffer(buffer);
1615                 /* loop back to fetch next in chain */
1616         }
1617
1618         /*
1619          * Return the copied tuple
1620          */
1621         return copyTuple;
1622 }
1623
1624 /*
1625  * EvalPlanQualInit -- initialize during creation of a plan state node
1626  * that might need to invoke EPQ processing.
1627  * Note: subplan can be NULL if it will be set later with EvalPlanQualSetPlan.
1628  */
1629 void
1630 EvalPlanQualInit(EPQState *epqstate, EState *estate,
1631                                  Plan *subplan, int epqParam)
1632 {
1633         /* Mark the EPQ state inactive */
1634         epqstate->estate = NULL;
1635         epqstate->planstate = NULL;
1636         epqstate->origslot = NULL;
1637         /* ... and remember data that EvalPlanQualBegin will need */
1638         epqstate->plan = subplan;
1639         epqstate->rowMarks = NIL;
1640         epqstate->epqParam = epqParam;
1641 }
1642
1643 /*
1644  * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
1645  *
1646  * We need this so that ModifyTuple can deal with multiple subplans.
1647  */
1648 void
1649 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan)
1650 {
1651         /* If we have a live EPQ query, shut it down */
1652         EvalPlanQualEnd(epqstate);
1653         /* And set/change the plan pointer */
1654         epqstate->plan = subplan;
1655 }
1656
1657 /*
1658  * EvalPlanQualAddRowMark -- add an ExecRowMark that EPQ needs to handle.
1659  *
1660  * Currently, only non-locking RowMarks are supported.
1661  */
1662 void
1663 EvalPlanQualAddRowMark(EPQState *epqstate, ExecRowMark *erm)
1664 {
1665         if (RowMarkRequiresRowShareLock(erm->markType))
1666                 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
1667         epqstate->rowMarks = lappend(epqstate->rowMarks, erm);
1668 }
1669
1670 /*
1671  * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
1672  *
1673  * NB: passed tuple must be palloc'd; it may get freed later
1674  */
1675 void
1676 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
1677 {
1678         EState     *estate = epqstate->estate;
1679
1680         Assert(rti > 0);
1681
1682         /*
1683          * free old test tuple, if any, and store new tuple where relation's
1684          * scan node will see it
1685          */
1686         if (estate->es_epqTuple[rti - 1] != NULL)
1687                 heap_freetuple(estate->es_epqTuple[rti - 1]);
1688         estate->es_epqTuple[rti - 1] = tuple;
1689         estate->es_epqTupleSet[rti - 1] = true;
1690 }
1691
1692 /*
1693  * Fetch back the current test tuple (if any) for the specified RTI
1694  */
1695 HeapTuple
1696 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
1697 {
1698         EState     *estate = epqstate->estate;
1699
1700         Assert(rti > 0);
1701
1702         return estate->es_epqTuple[rti - 1];
1703 }
1704
1705 /*
1706  * Fetch the current row values for any non-locked relations that need
1707  * to be scanned by an EvalPlanQual operation.  origslot must have been set
1708  * to contain the current result row (top-level row) that we need to recheck.
1709  */
1710 void
1711 EvalPlanQualFetchRowMarks(EPQState *epqstate)
1712 {
1713         ListCell   *l;
1714
1715         Assert(epqstate->origslot != NULL);
1716
1717         foreach(l, epqstate->rowMarks)
1718         {
1719                 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1720                 Datum           datum;
1721                 bool            isNull;
1722                 HeapTupleData tuple;
1723
1724                 /* clear any leftover test tuple for this rel */
1725                 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
1726
1727                 if (erm->relation)
1728                 {
1729                         Buffer          buffer;
1730
1731                         Assert(erm->markType == ROW_MARK_REFERENCE);
1732
1733                         /* if child rel, must check whether it produced this row */
1734                         if (erm->rti != erm->prti)
1735                         {
1736                                 Oid                     tableoid;
1737
1738                                 datum = ExecGetJunkAttribute(epqstate->origslot,
1739                                                                                          erm->toidAttNo,
1740                                                                                          &isNull);
1741                                 /* non-locked rels could be on the inside of outer joins */
1742                                 if (isNull)
1743                                         continue;
1744                                 tableoid = DatumGetObjectId(datum);
1745
1746                                 if (tableoid != RelationGetRelid(erm->relation))
1747                                 {
1748                                         /* this child is inactive right now */
1749                                         continue;
1750                                 }
1751                         }
1752
1753                         /* fetch the tuple's ctid */
1754                         datum = ExecGetJunkAttribute(epqstate->origslot,
1755                                                                                  erm->ctidAttNo,
1756                                                                                  &isNull);
1757                         /* non-locked rels could be on the inside of outer joins */
1758                         if (isNull)
1759                                 continue;
1760                         tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1761
1762                         /* okay, fetch the tuple */
1763                         if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
1764                                                         false, NULL))
1765                                 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
1766
1767                         /* successful, copy and store tuple */
1768                         EvalPlanQualSetTuple(epqstate, erm->rti,
1769                                                                  heap_copytuple(&tuple));
1770                         ReleaseBuffer(buffer);
1771                 }
1772                 else
1773                 {
1774                         HeapTupleHeader td;
1775
1776                         Assert(erm->markType == ROW_MARK_COPY);
1777
1778                         /* fetch the whole-row Var for the relation */
1779                         datum = ExecGetJunkAttribute(epqstate->origslot,
1780                                                                                  erm->wholeAttNo,
1781                                                                                  &isNull);
1782                         /* non-locked rels could be on the inside of outer joins */
1783                         if (isNull)
1784                                 continue;
1785                         td = DatumGetHeapTupleHeader(datum);
1786
1787                         /* build a temporary HeapTuple control structure */
1788                         tuple.t_len = HeapTupleHeaderGetDatumLength(td);
1789                         ItemPointerSetInvalid(&(tuple.t_self));
1790                         tuple.t_tableOid = InvalidOid;
1791                         tuple.t_data = td;
1792
1793                         /* copy and store tuple */
1794                         EvalPlanQualSetTuple(epqstate, erm->rti,
1795                                                                  heap_copytuple(&tuple));
1796                 }
1797         }
1798 }
1799
1800 /*
1801  * Fetch the next row (if any) from EvalPlanQual testing
1802  *
1803  * (In practice, there should never be more than one row...)
1804  */
1805 TupleTableSlot *
1806 EvalPlanQualNext(EPQState *epqstate)
1807 {
1808         MemoryContext oldcontext;
1809         TupleTableSlot *slot;
1810
1811         oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
1812         slot = ExecProcNode(epqstate->planstate);
1813         MemoryContextSwitchTo(oldcontext);
1814
1815         return slot;
1816 }
1817
1818 /*
1819  * Initialize or reset an EvalPlanQual state tree
1820  */
1821 void
1822 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
1823 {
1824         EState     *estate = epqstate->estate;
1825
1826         if (estate == NULL)
1827         {
1828                 /* First time through, so create a child EState */
1829                 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
1830         }
1831         else
1832         {
1833                 /*
1834                  * We already have a suitable child EPQ tree, so just reset it.
1835                  */
1836                 int                     rtsize = list_length(parentestate->es_range_table);
1837                 PlanState  *planstate = epqstate->planstate;
1838
1839                 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
1840
1841                 /* Recopy current values of parent parameters */
1842                 if (parentestate->es_plannedstmt->nParamExec > 0)
1843                 {
1844                         int             i = parentestate->es_plannedstmt->nParamExec;
1845
1846                         while (--i >= 0)
1847                         {
1848                                 /* copy value if any, but not execPlan link */
1849                                 estate->es_param_exec_vals[i].value =
1850                                         parentestate->es_param_exec_vals[i].value;
1851                                 estate->es_param_exec_vals[i].isnull =
1852                                         parentestate->es_param_exec_vals[i].isnull;
1853                         }
1854                 }
1855
1856                 /*
1857                  * Mark child plan tree as needing rescan at all scan nodes.  The
1858                  * first ExecProcNode will take care of actually doing the rescan.
1859                  */
1860                 planstate->chgParam = bms_add_member(planstate->chgParam,
1861                                                                                          epqstate->epqParam);
1862         }
1863 }
1864
1865 /*
1866  * Start execution of an EvalPlanQual plan tree.
1867  *
1868  * This is a cut-down version of ExecutorStart(): we copy some state from
1869  * the top-level estate rather than initializing it fresh.
1870  */
1871 static void
1872 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
1873 {
1874         EState     *estate;
1875         int                     rtsize;
1876         MemoryContext oldcontext;
1877         ListCell   *l;
1878
1879         rtsize = list_length(parentestate->es_range_table);
1880
1881         epqstate->estate = estate = CreateExecutorState();
1882
1883         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1884
1885         /*
1886          * Child EPQ EStates share the parent's copy of unchanging state such as
1887          * the snapshot, rangetable, result-rel info, and external Param info.
1888          * They need their own copies of local state, including a tuple table,
1889          * es_param_exec_vals, etc.
1890          */
1891         estate->es_direction = ForwardScanDirection;
1892         estate->es_snapshot = parentestate->es_snapshot;
1893         estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
1894         estate->es_range_table = parentestate->es_range_table;
1895         estate->es_plannedstmt = parentestate->es_plannedstmt;
1896         estate->es_junkFilter = parentestate->es_junkFilter;
1897         estate->es_output_cid = parentestate->es_output_cid;
1898         estate->es_result_relations = parentestate->es_result_relations;
1899         estate->es_num_result_relations = parentestate->es_num_result_relations;
1900         estate->es_result_relation_info = parentestate->es_result_relation_info;
1901         /* es_trig_target_relations must NOT be copied */
1902         estate->es_rowMarks = parentestate->es_rowMarks;
1903         estate->es_instrument = parentestate->es_instrument;
1904         estate->es_select_into = parentestate->es_select_into;
1905         estate->es_into_oids = parentestate->es_into_oids;
1906
1907         /*
1908          * The external param list is simply shared from parent.  The internal
1909          * param workspace has to be local state, but we copy the initial values
1910          * from the parent, so as to have access to any param values that were
1911          * already set from other parts of the parent's plan tree.
1912          */
1913         estate->es_param_list_info = parentestate->es_param_list_info;
1914         if (parentestate->es_plannedstmt->nParamExec > 0)
1915         {
1916                 int             i = parentestate->es_plannedstmt->nParamExec;
1917
1918                 estate->es_param_exec_vals = (ParamExecData *)
1919                         palloc0(i * sizeof(ParamExecData));
1920                 while (--i >= 0)
1921                 {
1922                         /* copy value if any, but not execPlan link */
1923                         estate->es_param_exec_vals[i].value =
1924                                 parentestate->es_param_exec_vals[i].value;
1925                         estate->es_param_exec_vals[i].isnull =
1926                                 parentestate->es_param_exec_vals[i].isnull;
1927                 }
1928         }
1929
1930         /*
1931          * Each EState must have its own es_epqScanDone state, but if we have
1932          * nested EPQ checks they should share es_epqTuple arrays.  This allows
1933          * sub-rechecks to inherit the values being examined by an outer recheck.
1934          */
1935         estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
1936         if (parentestate->es_epqTuple != NULL)
1937         {
1938                 estate->es_epqTuple = parentestate->es_epqTuple;
1939                 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
1940         }
1941         else
1942         {
1943                 estate->es_epqTuple = (HeapTuple *)
1944                         palloc0(rtsize * sizeof(HeapTuple));
1945                 estate->es_epqTupleSet = (bool *)
1946                         palloc0(rtsize * sizeof(bool));
1947         }
1948
1949         /*
1950          * Each estate also has its own tuple table.
1951          */
1952         estate->es_tupleTable = NIL;
1953
1954         /*
1955          * Initialize private state information for each SubPlan.  We must do this
1956          * before running ExecInitNode on the main query tree, since
1957          * ExecInitSubPlan expects to be able to find these entries.
1958          * Some of the SubPlans might not be used in the part of the plan tree
1959          * we intend to run, but since it's not easy to tell which, we just
1960          * initialize them all.
1961          */
1962         Assert(estate->es_subplanstates == NIL);
1963         foreach(l, parentestate->es_plannedstmt->subplans)
1964         {
1965                 Plan       *subplan = (Plan *) lfirst(l);
1966                 PlanState  *subplanstate;
1967
1968                 subplanstate = ExecInitNode(subplan, estate, 0);
1969
1970                 estate->es_subplanstates = lappend(estate->es_subplanstates,
1971                                                                                    subplanstate);
1972         }
1973
1974         /*
1975          * Initialize the private state information for all the nodes in the
1976          * part of the plan tree we need to run.  This opens files, allocates
1977          * storage and leaves us ready to start processing tuples.
1978          */
1979         epqstate->planstate = ExecInitNode(planTree, estate, 0);
1980
1981         MemoryContextSwitchTo(oldcontext);
1982 }
1983
1984 /*
1985  * EvalPlanQualEnd -- shut down at termination of parent plan state node,
1986  * or if we are done with the current EPQ child.
1987  *
1988  * This is a cut-down version of ExecutorEnd(); basically we want to do most
1989  * of the normal cleanup, but *not* close result relations (which we are
1990  * just sharing from the outer query).  We do, however, have to close any
1991  * trigger target relations that got opened, since those are not shared.
1992  * (There probably shouldn't be any of the latter, but just in case...)
1993  */
1994 void
1995 EvalPlanQualEnd(EPQState *epqstate)
1996 {
1997         EState     *estate = epqstate->estate;
1998         MemoryContext oldcontext;
1999         ListCell   *l;
2000
2001         if (estate == NULL)
2002                 return;                                 /* idle, so nothing to do */
2003
2004         oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2005
2006         ExecEndNode(epqstate->planstate);
2007
2008         foreach(l, estate->es_subplanstates)
2009         {
2010                 PlanState  *subplanstate = (PlanState *) lfirst(l);
2011
2012                 ExecEndNode(subplanstate);
2013         }
2014
2015         /* throw away the per-estate tuple table */
2016         ExecResetTupleTable(estate->es_tupleTable, false);
2017
2018         /* close any trigger target relations attached to this EState */
2019         foreach(l, estate->es_trig_target_relations)
2020         {
2021                 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2022
2023                 /* Close indices and then the relation itself */
2024                 ExecCloseIndices(resultRelInfo);
2025                 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2026         }
2027
2028         MemoryContextSwitchTo(oldcontext);
2029
2030         FreeExecutorState(estate);
2031
2032         /* Mark EPQState idle */
2033         epqstate->estate = NULL;
2034         epqstate->planstate = NULL;
2035         epqstate->origslot = NULL;
2036 }
2037
2038
2039 /*
2040  * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2041  *
2042  * We implement SELECT INTO by diverting SELECT's normal output with
2043  * a specialized DestReceiver type.
2044  */
2045
2046 typedef struct
2047 {
2048         DestReceiver pub;                       /* publicly-known function pointers */
2049         EState     *estate;                     /* EState we are working with */
2050         Relation        rel;                    /* Relation to write to */
2051         int                     hi_options;             /* heap_insert performance options */
2052         BulkInsertState bistate;        /* bulk insert state */
2053 } DR_intorel;
2054
2055 /*
2056  * OpenIntoRel --- actually create the SELECT INTO target relation
2057  *
2058  * This also replaces QueryDesc->dest with the special DestReceiver for
2059  * SELECT INTO.  We assume that the correct result tuple type has already
2060  * been placed in queryDesc->tupDesc.
2061  */
2062 static void
2063 OpenIntoRel(QueryDesc *queryDesc)
2064 {
2065         IntoClause *into = queryDesc->plannedstmt->intoClause;
2066         EState     *estate = queryDesc->estate;
2067         Relation        intoRelationDesc;
2068         char       *intoName;
2069         Oid                     namespaceId;
2070         Oid                     tablespaceId;
2071         Datum           reloptions;
2072         AclResult       aclresult;
2073         Oid                     intoRelationId;
2074         TupleDesc       tupdesc;
2075         DR_intorel *myState;
2076         static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
2077
2078         Assert(into);
2079
2080         /*
2081          * XXX This code needs to be kept in sync with DefineRelation().
2082          * Maybe we should try to use that function instead.
2083          */
2084
2085         /*
2086          * Check consistency of arguments
2087          */
2088         if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2089                 ereport(ERROR,
2090                                 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2091                                  errmsg("ON COMMIT can only be used on temporary tables")));
2092
2093         /*
2094          * Security check: disallow creating temp tables from security-restricted
2095          * code.  This is needed because calling code might not expect untrusted
2096          * tables to appear in pg_temp at the front of its search path.
2097          */
2098         if (into->rel->istemp && InSecurityRestrictedOperation())
2099                 ereport(ERROR,
2100                                 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2101                                  errmsg("cannot create temporary table within security-restricted operation")));
2102
2103         /*
2104          * Find namespace to create in, check its permissions
2105          */
2106         intoName = into->rel->relname;
2107         namespaceId = RangeVarGetCreationNamespace(into->rel);
2108
2109         aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2110                                                                           ACL_CREATE);
2111         if (aclresult != ACLCHECK_OK)
2112                 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2113                                            get_namespace_name(namespaceId));
2114
2115         /*
2116          * Select tablespace to use.  If not specified, use default tablespace
2117          * (which may in turn default to database's default).
2118          */
2119         if (into->tableSpaceName)
2120         {
2121                 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2122                 if (!OidIsValid(tablespaceId))
2123                         ereport(ERROR,
2124                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
2125                                          errmsg("tablespace \"%s\" does not exist",
2126                                                         into->tableSpaceName)));
2127         }
2128         else
2129         {
2130                 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2131                 /* note InvalidOid is OK in this case */
2132         }
2133
2134         /* Check permissions except when using the database's default space */
2135         if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2136         {
2137                 AclResult       aclresult;
2138
2139                 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2140                                                                                    ACL_CREATE);
2141
2142                 if (aclresult != ACLCHECK_OK)
2143                         aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2144                                                    get_tablespace_name(tablespaceId));
2145         }
2146
2147         /* Parse and validate any reloptions */
2148         reloptions = transformRelOptions((Datum) 0,
2149                                                                          into->options,
2150                                                                          NULL,
2151                                                                          validnsps,
2152                                                                          true,
2153                                                                          false);
2154         (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2155
2156         /* Copy the tupdesc because heap_create_with_catalog modifies it */
2157         tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2158
2159         /* Now we can actually create the new relation */
2160         intoRelationId = heap_create_with_catalog(intoName,
2161                                                                                           namespaceId,
2162                                                                                           tablespaceId,
2163                                                                                           InvalidOid,
2164                                                                                           InvalidOid,
2165                                                                                           GetUserId(),
2166                                                                                           tupdesc,
2167                                                                                           NIL,
2168                                                                                           RELKIND_RELATION,
2169                                                                                           false,
2170                                                                                           true,
2171                                                                                           0,
2172                                                                                           into->onCommit,
2173                                                                                           reloptions,
2174                                                                                           true,
2175                                                                                           allowSystemTableMods);
2176
2177         FreeTupleDesc(tupdesc);
2178
2179         /*
2180          * Advance command counter so that the newly-created relation's catalog
2181          * tuples will be visible to heap_open.
2182          */
2183         CommandCounterIncrement();
2184
2185         /*
2186          * If necessary, create a TOAST table for the INTO relation. Note that
2187          * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2188          * the TOAST table will be visible for insertion.
2189          */
2190         reloptions = transformRelOptions((Datum) 0,
2191                                                                          into->options,
2192                                                                          "toast",
2193                                                                          validnsps,
2194                                                                          true,
2195                                                                          false);
2196
2197         (void) heap_reloptions(RELKIND_TOASTVALUE, reloptions, true);
2198
2199         AlterTableCreateToastTable(intoRelationId, reloptions);
2200
2201         /*
2202          * And open the constructed table for writing.
2203          */
2204         intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2205
2206         /*
2207          * Now replace the query's DestReceiver with one for SELECT INTO
2208          */
2209         queryDesc->dest = CreateDestReceiver(DestIntoRel);
2210         myState = (DR_intorel *) queryDesc->dest;
2211         Assert(myState->pub.mydest == DestIntoRel);
2212         myState->estate = estate;
2213         myState->rel = intoRelationDesc;
2214
2215         /*
2216          * We can skip WAL-logging the insertions, unless PITR is in use.  We can
2217          * skip the FSM in any case.
2218          */
2219         myState->hi_options = HEAP_INSERT_SKIP_FSM |
2220                 (XLogArchivingActive() ? 0 : HEAP_INSERT_SKIP_WAL);
2221         myState->bistate = GetBulkInsertState();
2222
2223         /* Not using WAL requires rd_targblock be initially invalid */
2224         Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2225 }
2226
2227 /*
2228  * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2229  */
2230 static void
2231 CloseIntoRel(QueryDesc *queryDesc)
2232 {
2233         DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2234
2235         /* OpenIntoRel might never have gotten called */
2236         if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2237         {
2238                 FreeBulkInsertState(myState->bistate);
2239
2240                 /* If we skipped using WAL, must heap_sync before commit */
2241                 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2242                         heap_sync(myState->rel);
2243
2244                 /* close rel, but keep lock until commit */
2245                 heap_close(myState->rel, NoLock);
2246
2247                 myState->rel = NULL;
2248         }
2249 }
2250
2251 /*
2252  * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2253  */
2254 DestReceiver *
2255 CreateIntoRelDestReceiver(void)
2256 {
2257         DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2258
2259         self->pub.receiveSlot = intorel_receive;
2260         self->pub.rStartup = intorel_startup;
2261         self->pub.rShutdown = intorel_shutdown;
2262         self->pub.rDestroy = intorel_destroy;
2263         self->pub.mydest = DestIntoRel;
2264
2265         /* private fields will be set by OpenIntoRel */
2266
2267         return (DestReceiver *) self;
2268 }
2269
2270 /*
2271  * intorel_startup --- executor startup
2272  */
2273 static void
2274 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2275 {
2276         /* no-op */
2277 }
2278
2279 /*
2280  * intorel_receive --- receive one tuple
2281  */
2282 static void
2283 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2284 {
2285         DR_intorel *myState = (DR_intorel *) self;
2286         HeapTuple       tuple;
2287
2288         /*
2289          * get the heap tuple out of the tuple table slot, making sure we have a
2290          * writable copy
2291          */
2292         tuple = ExecMaterializeSlot(slot);
2293
2294         /*
2295          * force assignment of new OID (see comments in ExecInsert)
2296          */
2297         if (myState->rel->rd_rel->relhasoids)
2298                 HeapTupleSetOid(tuple, InvalidOid);
2299
2300         heap_insert(myState->rel,
2301                                 tuple,
2302                                 myState->estate->es_output_cid,
2303                                 myState->hi_options,
2304                                 myState->bistate);
2305
2306         /* We know this is a newly created relation, so there are no indexes */
2307 }
2308
2309 /*
2310  * intorel_shutdown --- executor end
2311  */
2312 static void
2313 intorel_shutdown(DestReceiver *self)
2314 {
2315         /* no-op */
2316 }
2317
2318 /*
2319  * intorel_destroy --- release DestReceiver object
2320  */
2321 static void
2322 intorel_destroy(DestReceiver *self)
2323 {
2324         pfree(self);
2325 }