1 /*-------------------------------------------------------------------------
4 * heap access method code
6 * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/heap/heapam.c
15 * relation_open - open any relation by relation OID
16 * relation_openrv - open any relation specified by a RangeVar
17 * relation_close - close any relation
18 * heap_open - open a heap relation by relation OID
19 * heap_openrv - open a heap relation specified by a RangeVar
20 * heap_close - (now just a macro for relation_close)
21 * heap_beginscan - begin relation scan
22 * heap_rescan - restart a relation scan
23 * heap_endscan - end relation scan
24 * heap_getnext - retrieve next tuple in scan
25 * heap_fetch - retrieve tuple with given tid
26 * heap_insert - insert tuple into a relation
27 * heap_delete - delete a tuple from a relation
28 * heap_update - replace a tuple in a relation with another tuple
29 * heap_markpos - mark scan position
30 * heap_restrpos - restore position to marked location
31 * heap_sync - sync heap, for when no WAL has been written
34 * This file contains the heap_ routines which implement
35 * the POSTGRES heap access method used for all POSTGRES
38 *-------------------------------------------------------------------------
42 #include "access/heapam.h"
43 #include "access/hio.h"
44 #include "access/multixact.h"
45 #include "access/relscan.h"
46 #include "access/sysattr.h"
47 #include "access/transam.h"
48 #include "access/tuptoaster.h"
49 #include "access/valid.h"
50 #include "access/visibilitymap.h"
51 #include "access/xact.h"
52 #include "access/xlogutils.h"
53 #include "catalog/catalog.h"
54 #include "catalog/namespace.h"
55 #include "miscadmin.h"
57 #include "storage/bufmgr.h"
58 #include "storage/freespace.h"
59 #include "storage/lmgr.h"
60 #include "storage/predicate.h"
61 #include "storage/procarray.h"
62 #include "storage/smgr.h"
63 #include "storage/standby.h"
64 #include "utils/datum.h"
65 #include "utils/inval.h"
66 #include "utils/lsyscache.h"
67 #include "utils/relcache.h"
68 #include "utils/snapmgr.h"
69 #include "utils/syscache.h"
70 #include "utils/tqual.h"
74 bool synchronize_seqscans = true;
77 static HeapScanDesc heap_beginscan_internal(Relation relation,
79 int nkeys, ScanKey key,
80 bool allow_strat, bool allow_sync,
82 static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
83 ItemPointerData from, Buffer newbuf, HeapTuple newtup,
84 bool all_visible_cleared, bool new_all_visible_cleared);
85 static bool HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
86 HeapTuple oldtup, HeapTuple newtup);
89 /* ----------------------------------------------------------------
90 * heap support routines
91 * ----------------------------------------------------------------
95 * initscan - scan code common to heap_beginscan and heap_rescan
99 initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
105 * Determine the number of blocks we have to scan.
107 * It is sufficient to do this once at scan start, since any tuples added
108 * while the scan is in progress will be invisible to my snapshot anyway.
109 * (That is not true when using a non-MVCC snapshot. However, we couldn't
110 * guarantee to return tuples added after scan start anyway, since they
111 * might go into pages we already scanned. To guarantee consistent
112 * results for a non-MVCC snapshot, the caller must hold some higher-level
113 * lock that ensures the interesting tuple(s) won't change.)
115 scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
118 * If the table is large relative to NBuffers, use a bulk-read access
119 * strategy and enable synchronized scanning (see syncscan.c). Although
120 * the thresholds for these features could be different, we make them the
121 * same so that there are only two behaviors to tune rather than four.
122 * (However, some callers need to be able to disable one or both of these
123 * behaviors, independently of the size of the table; also there is a GUC
124 * variable that can disable synchronized scanning.)
126 * During a rescan, don't make a new strategy object if we don't have to.
128 if (!RelationUsesLocalBuffers(scan->rs_rd) &&
129 scan->rs_nblocks > NBuffers / 4)
131 allow_strat = scan->rs_allow_strat;
132 allow_sync = scan->rs_allow_sync;
135 allow_strat = allow_sync = false;
139 if (scan->rs_strategy == NULL)
140 scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
144 if (scan->rs_strategy != NULL)
145 FreeAccessStrategy(scan->rs_strategy);
146 scan->rs_strategy = NULL;
152 * If rescan, keep the previous startblock setting so that rewinding a
153 * cursor doesn't generate surprising results. Reset the syncscan
156 scan->rs_syncscan = (allow_sync && synchronize_seqscans);
158 else if (allow_sync && synchronize_seqscans)
160 scan->rs_syncscan = true;
161 scan->rs_startblock = ss_get_location(scan->rs_rd, scan->rs_nblocks);
165 scan->rs_syncscan = false;
166 scan->rs_startblock = 0;
169 scan->rs_inited = false;
170 scan->rs_ctup.t_data = NULL;
171 ItemPointerSetInvalid(&scan->rs_ctup.t_self);
172 scan->rs_cbuf = InvalidBuffer;
173 scan->rs_cblock = InvalidBlockNumber;
175 /* we don't have a marked position... */
176 ItemPointerSetInvalid(&(scan->rs_mctid));
178 /* page-at-a-time fields are always invalid when not rs_inited */
181 * copy the scan key, if appropriate
184 memcpy(scan->rs_key, key, scan->rs_nkeys * sizeof(ScanKeyData));
187 * Currently, we don't have a stats counter for bitmap heap scans (but the
188 * underlying bitmap index scans will be counted).
190 if (!scan->rs_bitmapscan)
191 pgstat_count_heap_scan(scan->rs_rd);
195 * heapgetpage - subroutine for heapgettup()
197 * This routine reads and pins the specified page of the relation.
198 * In page-at-a-time mode it performs additional work, namely determining
199 * which tuples on the page are visible.
202 heapgetpage(HeapScanDesc scan, BlockNumber page)
209 OffsetNumber lineoff;
213 Assert(page < scan->rs_nblocks);
215 /* release previous scan buffer, if any */
216 if (BufferIsValid(scan->rs_cbuf))
218 ReleaseBuffer(scan->rs_cbuf);
219 scan->rs_cbuf = InvalidBuffer;
222 /* read page using selected strategy */
223 scan->rs_cbuf = ReadBufferExtended(scan->rs_rd, MAIN_FORKNUM, page,
224 RBM_NORMAL, scan->rs_strategy);
225 scan->rs_cblock = page;
227 if (!scan->rs_pageatatime)
230 buffer = scan->rs_cbuf;
231 snapshot = scan->rs_snapshot;
234 * Prune and repair fragmentation for the whole page, if possible.
236 Assert(TransactionIdIsValid(RecentGlobalXmin));
237 heap_page_prune_opt(scan->rs_rd, buffer, RecentGlobalXmin);
240 * We must hold share lock on the buffer content while examining tuple
241 * visibility. Afterwards, however, the tuples we have found to be
242 * visible are guaranteed good as long as we hold the buffer pin.
244 LockBuffer(buffer, BUFFER_LOCK_SHARE);
246 dp = (Page) BufferGetPage(buffer);
247 lines = PageGetMaxOffsetNumber(dp);
251 * If the all-visible flag indicates that all tuples on the page are
252 * visible to everyone, we can skip the per-tuple visibility tests. But
253 * not in hot standby mode. A tuple that's already visible to all
254 * transactions in the master might still be invisible to a read-only
255 * transaction in the standby.
257 all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
259 for (lineoff = FirstOffsetNumber, lpp = PageGetItemId(dp, lineoff);
263 if (ItemIdIsNormal(lpp))
265 HeapTupleData loctup;
268 loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
269 loctup.t_len = ItemIdGetLength(lpp);
270 ItemPointerSet(&(loctup.t_self), page, lineoff);
275 valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
277 CheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
281 scan->rs_vistuples[ntup++] = lineoff;
285 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
287 Assert(ntup <= MaxHeapTuplesPerPage);
288 scan->rs_ntuples = ntup;
292 * heapgettup - fetch next heap tuple
294 * Initialize the scan if not already done; then advance to the next
295 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
296 * or set scan->rs_ctup.t_data = NULL if no more tuples.
298 * dir == NoMovementScanDirection means "re-fetch the tuple indicated
301 * Note: the reason nkeys/key are passed separately, even though they are
302 * kept in the scan descriptor, is that the caller may not want us to check
305 * Note: when we fall off the end of the scan in either direction, we
306 * reset rs_inited. This means that a further request with the same
307 * scan direction will restart the scan, which is a bit odd, but a
308 * request with the opposite scan direction will start a fresh scan
309 * in the proper direction. The latter is required behavior for cursors,
310 * while the former case is generally undefined behavior in Postgres
311 * so we don't care too much.
315 heapgettup(HeapScanDesc scan,
320 HeapTuple tuple = &(scan->rs_ctup);
321 Snapshot snapshot = scan->rs_snapshot;
322 bool backward = ScanDirectionIsBackward(dir);
327 OffsetNumber lineoff;
332 * calculate next starting lineoff, given scan direction
334 if (ScanDirectionIsForward(dir))
336 if (!scan->rs_inited)
339 * return null immediately if relation is empty
341 if (scan->rs_nblocks == 0)
343 Assert(!BufferIsValid(scan->rs_cbuf));
344 tuple->t_data = NULL;
347 page = scan->rs_startblock; /* first page */
348 heapgetpage(scan, page);
349 lineoff = FirstOffsetNumber; /* first offnum */
350 scan->rs_inited = true;
354 /* continue from previously returned page/tuple */
355 page = scan->rs_cblock; /* current page */
356 lineoff = /* next offnum */
357 OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
360 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
362 dp = (Page) BufferGetPage(scan->rs_cbuf);
363 lines = PageGetMaxOffsetNumber(dp);
364 /* page and lineoff now reference the physically next tid */
366 linesleft = lines - lineoff + 1;
370 if (!scan->rs_inited)
373 * return null immediately if relation is empty
375 if (scan->rs_nblocks == 0)
377 Assert(!BufferIsValid(scan->rs_cbuf));
378 tuple->t_data = NULL;
383 * Disable reporting to syncscan logic in a backwards scan; it's
384 * not very likely anyone else is doing the same thing at the same
385 * time, and much more likely that we'll just bollix things for
388 scan->rs_syncscan = false;
389 /* start from last page of the scan */
390 if (scan->rs_startblock > 0)
391 page = scan->rs_startblock - 1;
393 page = scan->rs_nblocks - 1;
394 heapgetpage(scan, page);
398 /* continue from previously returned page/tuple */
399 page = scan->rs_cblock; /* current page */
402 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
404 dp = (Page) BufferGetPage(scan->rs_cbuf);
405 lines = PageGetMaxOffsetNumber(dp);
407 if (!scan->rs_inited)
409 lineoff = lines; /* final offnum */
410 scan->rs_inited = true;
414 lineoff = /* previous offnum */
415 OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self)));
417 /* page and lineoff now reference the physically previous tid */
424 * ``no movement'' scan direction: refetch prior tuple
426 if (!scan->rs_inited)
428 Assert(!BufferIsValid(scan->rs_cbuf));
429 tuple->t_data = NULL;
433 page = ItemPointerGetBlockNumber(&(tuple->t_self));
434 if (page != scan->rs_cblock)
435 heapgetpage(scan, page);
437 /* Since the tuple was previously fetched, needn't lock page here */
438 dp = (Page) BufferGetPage(scan->rs_cbuf);
439 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
440 lpp = PageGetItemId(dp, lineoff);
441 Assert(ItemIdIsNormal(lpp));
443 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
444 tuple->t_len = ItemIdGetLength(lpp);
450 * advance the scan until we find a qualifying tuple or run out of stuff
453 lpp = PageGetItemId(dp, lineoff);
456 while (linesleft > 0)
458 if (ItemIdIsNormal(lpp))
462 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
463 tuple->t_len = ItemIdGetLength(lpp);
464 ItemPointerSet(&(tuple->t_self), page, lineoff);
467 * if current tuple qualifies, return it.
469 valid = HeapTupleSatisfiesVisibility(tuple,
473 CheckForSerializableConflictOut(valid, scan->rs_rd, tuple,
474 scan->rs_cbuf, snapshot);
476 if (valid && key != NULL)
477 HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),
482 if (!scan->rs_relpredicatelocked)
483 PredicateLockTuple(scan->rs_rd, tuple, snapshot);
484 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
490 * otherwise move to the next item on the page
495 --lpp; /* move back in this page's ItemId array */
500 ++lpp; /* move forward in this page's ItemId array */
506 * if we get here, it means we've exhausted the items on this page and
507 * it's time to move to the next.
509 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
512 * advance to next/prior page and detect end of scan
516 finished = (page == scan->rs_startblock);
518 page = scan->rs_nblocks;
524 if (page >= scan->rs_nblocks)
526 finished = (page == scan->rs_startblock);
529 * Report our new scan position for synchronization purposes. We
530 * don't do that when moving backwards, however. That would just
531 * mess up any other forward-moving scanners.
533 * Note: we do this before checking for end of scan so that the
534 * final state of the position hint is back at the start of the
535 * rel. That's not strictly necessary, but otherwise when you run
536 * the same query multiple times the starting position would shift
537 * a little bit backwards on every invocation, which is confusing.
538 * We don't guarantee any specific ordering in general, though.
540 if (scan->rs_syncscan)
541 ss_report_location(scan->rs_rd, page);
545 * return NULL if we've exhausted all the pages
549 if (BufferIsValid(scan->rs_cbuf))
550 ReleaseBuffer(scan->rs_cbuf);
551 scan->rs_cbuf = InvalidBuffer;
552 scan->rs_cblock = InvalidBlockNumber;
553 tuple->t_data = NULL;
554 scan->rs_inited = false;
558 heapgetpage(scan, page);
560 LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
562 dp = (Page) BufferGetPage(scan->rs_cbuf);
563 lines = PageGetMaxOffsetNumber((Page) dp);
568 lpp = PageGetItemId(dp, lines);
572 lineoff = FirstOffsetNumber;
573 lpp = PageGetItemId(dp, FirstOffsetNumber);
579 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
581 * Same API as heapgettup, but used in page-at-a-time mode
583 * The internal logic is much the same as heapgettup's too, but there are some
584 * differences: we do not take the buffer content lock (that only needs to
585 * happen inside heapgetpage), and we iterate through just the tuples listed
586 * in rs_vistuples[] rather than all tuples on the page. Notice that
587 * lineindex is 0-based, where the corresponding loop variable lineoff in
588 * heapgettup is 1-based.
592 heapgettup_pagemode(HeapScanDesc scan,
597 HeapTuple tuple = &(scan->rs_ctup);
598 bool backward = ScanDirectionIsBackward(dir);
604 OffsetNumber lineoff;
609 * calculate next starting lineindex, given scan direction
611 if (ScanDirectionIsForward(dir))
613 if (!scan->rs_inited)
616 * return null immediately if relation is empty
618 if (scan->rs_nblocks == 0)
620 Assert(!BufferIsValid(scan->rs_cbuf));
621 tuple->t_data = NULL;
624 page = scan->rs_startblock; /* first page */
625 heapgetpage(scan, page);
627 scan->rs_inited = true;
631 /* continue from previously returned page/tuple */
632 page = scan->rs_cblock; /* current page */
633 lineindex = scan->rs_cindex + 1;
636 dp = (Page) BufferGetPage(scan->rs_cbuf);
637 lines = scan->rs_ntuples;
638 /* page and lineindex now reference the next visible tid */
640 linesleft = lines - lineindex;
644 if (!scan->rs_inited)
647 * return null immediately if relation is empty
649 if (scan->rs_nblocks == 0)
651 Assert(!BufferIsValid(scan->rs_cbuf));
652 tuple->t_data = NULL;
657 * Disable reporting to syncscan logic in a backwards scan; it's
658 * not very likely anyone else is doing the same thing at the same
659 * time, and much more likely that we'll just bollix things for
662 scan->rs_syncscan = false;
663 /* start from last page of the scan */
664 if (scan->rs_startblock > 0)
665 page = scan->rs_startblock - 1;
667 page = scan->rs_nblocks - 1;
668 heapgetpage(scan, page);
672 /* continue from previously returned page/tuple */
673 page = scan->rs_cblock; /* current page */
676 dp = (Page) BufferGetPage(scan->rs_cbuf);
677 lines = scan->rs_ntuples;
679 if (!scan->rs_inited)
681 lineindex = lines - 1;
682 scan->rs_inited = true;
686 lineindex = scan->rs_cindex - 1;
688 /* page and lineindex now reference the previous visible tid */
690 linesleft = lineindex + 1;
695 * ``no movement'' scan direction: refetch prior tuple
697 if (!scan->rs_inited)
699 Assert(!BufferIsValid(scan->rs_cbuf));
700 tuple->t_data = NULL;
704 page = ItemPointerGetBlockNumber(&(tuple->t_self));
705 if (page != scan->rs_cblock)
706 heapgetpage(scan, page);
708 /* Since the tuple was previously fetched, needn't lock page here */
709 dp = (Page) BufferGetPage(scan->rs_cbuf);
710 lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
711 lpp = PageGetItemId(dp, lineoff);
712 Assert(ItemIdIsNormal(lpp));
714 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
715 tuple->t_len = ItemIdGetLength(lpp);
717 /* check that rs_cindex is in sync */
718 Assert(scan->rs_cindex < scan->rs_ntuples);
719 Assert(lineoff == scan->rs_vistuples[scan->rs_cindex]);
725 * advance the scan until we find a qualifying tuple or run out of stuff
730 while (linesleft > 0)
732 lineoff = scan->rs_vistuples[lineindex];
733 lpp = PageGetItemId(dp, lineoff);
734 Assert(ItemIdIsNormal(lpp));
736 tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
737 tuple->t_len = ItemIdGetLength(lpp);
738 ItemPointerSet(&(tuple->t_self), page, lineoff);
741 * if current tuple qualifies, return it.
747 HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),
751 if (!scan->rs_relpredicatelocked)
752 PredicateLockTuple(scan->rs_rd, tuple, scan->rs_snapshot);
753 scan->rs_cindex = lineindex;
759 if (!scan->rs_relpredicatelocked)
760 PredicateLockTuple(scan->rs_rd, tuple, scan->rs_snapshot);
761 scan->rs_cindex = lineindex;
766 * otherwise move to the next item on the page
776 * if we get here, it means we've exhausted the items on this page and
777 * it's time to move to the next.
781 finished = (page == scan->rs_startblock);
783 page = scan->rs_nblocks;
789 if (page >= scan->rs_nblocks)
791 finished = (page == scan->rs_startblock);
794 * Report our new scan position for synchronization purposes. We
795 * don't do that when moving backwards, however. That would just
796 * mess up any other forward-moving scanners.
798 * Note: we do this before checking for end of scan so that the
799 * final state of the position hint is back at the start of the
800 * rel. That's not strictly necessary, but otherwise when you run
801 * the same query multiple times the starting position would shift
802 * a little bit backwards on every invocation, which is confusing.
803 * We don't guarantee any specific ordering in general, though.
805 if (scan->rs_syncscan)
806 ss_report_location(scan->rs_rd, page);
810 * return NULL if we've exhausted all the pages
814 if (BufferIsValid(scan->rs_cbuf))
815 ReleaseBuffer(scan->rs_cbuf);
816 scan->rs_cbuf = InvalidBuffer;
817 scan->rs_cblock = InvalidBlockNumber;
818 tuple->t_data = NULL;
819 scan->rs_inited = false;
823 heapgetpage(scan, page);
825 dp = (Page) BufferGetPage(scan->rs_cbuf);
826 lines = scan->rs_ntuples;
829 lineindex = lines - 1;
836 #if defined(DISABLE_COMPLEX_MACRO)
838 * This is formatted so oddly so that the correspondence to the macro
839 * definition in access/heapam.h is maintained.
842 fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
849 HeapTupleNoNulls(tup) ?
851 (tupleDesc)->attrs[(attnum) - 1]->attcacheoff >= 0 ?
853 fetchatt((tupleDesc)->attrs[(attnum) - 1],
854 (char *) (tup)->t_data + (tup)->t_data->t_hoff +
855 (tupleDesc)->attrs[(attnum) - 1]->attcacheoff)
858 nocachegetattr((tup), (attnum), (tupleDesc))
862 att_isnull((attnum) - 1, (tup)->t_data->t_bits) ?
869 nocachegetattr((tup), (attnum), (tupleDesc))
879 #endif /* defined(DISABLE_COMPLEX_MACRO) */
882 /* ----------------------------------------------------------------
883 * heap access method interface
884 * ----------------------------------------------------------------
888 * relation_open - open any relation by relation OID
890 * If lockmode is not "NoLock", the specified kind of lock is
891 * obtained on the relation. (Generally, NoLock should only be
892 * used if the caller knows it has some appropriate lock on the
895 * An error is raised if the relation does not exist.
897 * NB: a "relation" is anything with a pg_class entry. The caller is
898 * expected to check whether the relkind is something it can handle.
902 relation_open(Oid relationId, LOCKMODE lockmode)
906 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
908 /* Get the lock before trying to open the relcache entry */
909 if (lockmode != NoLock)
910 LockRelationOid(relationId, lockmode);
912 /* The relcache does all the real work... */
913 r = RelationIdGetRelation(relationId);
915 if (!RelationIsValid(r))
916 elog(ERROR, "could not open relation with OID %u", relationId);
918 /* Make note that we've accessed a temporary relation */
919 if (RelationUsesLocalBuffers(r))
920 MyXactAccessedTempRel = true;
928 * try_relation_open - open any relation by relation OID
930 * Same as relation_open, except return NULL instead of failing
931 * if the relation does not exist.
935 try_relation_open(Oid relationId, LOCKMODE lockmode)
939 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
941 /* Get the lock first */
942 if (lockmode != NoLock)
943 LockRelationOid(relationId, lockmode);
946 * Now that we have the lock, probe to see if the relation really exists
949 if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relationId)))
951 /* Release useless lock */
952 if (lockmode != NoLock)
953 UnlockRelationOid(relationId, lockmode);
958 /* Should be safe to do a relcache load */
959 r = RelationIdGetRelation(relationId);
961 if (!RelationIsValid(r))
962 elog(ERROR, "could not open relation with OID %u", relationId);
964 /* Make note that we've accessed a temporary relation */
965 if (RelationUsesLocalBuffers(r))
966 MyXactAccessedTempRel = true;
974 * relation_openrv - open any relation specified by a RangeVar
976 * Same as relation_open, but the relation is specified by a RangeVar.
980 relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
985 * Check for shared-cache-inval messages before trying to open the
986 * relation. This is needed to cover the case where the name identifies a
987 * rel that has been dropped and recreated since the start of our
988 * transaction: if we don't flush the old syscache entry then we'll latch
989 * onto that entry and suffer an error when we do RelationIdGetRelation.
990 * Note that relation_open does not need to do this, since a relation's
993 * We skip this if asked for NoLock, on the assumption that the caller has
994 * already ensured some appropriate lock is held.
996 if (lockmode != NoLock)
997 AcceptInvalidationMessages();
999 /* Look up the appropriate relation using namespace search */
1000 relOid = RangeVarGetRelid(relation, false);
1002 /* Let relation_open do the rest */
1003 return relation_open(relOid, lockmode);
1007 * relation_openrv_extended - open any relation specified by a RangeVar
1009 * Same as relation_openrv, but with an additional missing_ok argument
1010 * allowing a NULL return rather than an error if the relation is not
1011 * found. (Note that some other causes, such as permissions problems,
1012 * will still result in an ereport.)
1016 relation_openrv_extended(const RangeVar *relation, LOCKMODE lockmode,
1022 * Check for shared-cache-inval messages before trying to open the
1023 * relation. This is needed to cover the case where the name identifies a
1024 * rel that has been dropped and recreated since the start of our
1025 * transaction: if we don't flush the old syscache entry then we'll latch
1026 * onto that entry and suffer an error when we do RelationIdGetRelation.
1027 * Note that relation_open does not need to do this, since a relation's
1028 * OID never changes.
1030 * We skip this if asked for NoLock, on the assumption that the caller has
1031 * already ensured some appropriate lock is held.
1033 if (lockmode != NoLock)
1034 AcceptInvalidationMessages();
1036 /* Look up the appropriate relation using namespace search */
1037 relOid = RangeVarGetRelid(relation, missing_ok);
1039 /* Return NULL on not-found */
1040 if (!OidIsValid(relOid))
1043 /* Let relation_open do the rest */
1044 return relation_open(relOid, lockmode);
1048 * relation_close - close any relation
1050 * If lockmode is not "NoLock", we then release the specified lock.
1052 * Note that it is often sensible to hold a lock beyond relation_close;
1053 * in that case, the lock is released automatically at xact end.
1057 relation_close(Relation relation, LOCKMODE lockmode)
1059 LockRelId relid = relation->rd_lockInfo.lockRelId;
1061 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
1063 /* The relcache does the real work... */
1064 RelationClose(relation);
1066 if (lockmode != NoLock)
1067 UnlockRelationId(&relid, lockmode);
1072 * heap_open - open a heap relation by relation OID
1074 * This is essentially relation_open plus check that the relation
1075 * is not an index nor a composite type. (The caller should also
1076 * check that it's not a view or foreign table before assuming it has
1081 heap_open(Oid relationId, LOCKMODE lockmode)
1085 r = relation_open(relationId, lockmode);
1087 if (r->rd_rel->relkind == RELKIND_INDEX)
1089 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1090 errmsg("\"%s\" is an index",
1091 RelationGetRelationName(r))));
1092 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1094 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1095 errmsg("\"%s\" is a composite type",
1096 RelationGetRelationName(r))));
1102 * heap_openrv - open a heap relation specified
1103 * by a RangeVar node
1105 * As above, but relation is specified by a RangeVar.
1109 heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
1113 r = relation_openrv(relation, lockmode);
1115 if (r->rd_rel->relkind == RELKIND_INDEX)
1117 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1118 errmsg("\"%s\" is an index",
1119 RelationGetRelationName(r))));
1120 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1122 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1123 errmsg("\"%s\" is a composite type",
1124 RelationGetRelationName(r))));
1130 * heap_openrv_extended - open a heap relation specified
1131 * by a RangeVar node
1133 * As above, but optionally return NULL instead of failing for
1134 * relation-not-found.
1138 heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode,
1143 r = relation_openrv_extended(relation, lockmode, missing_ok);
1147 if (r->rd_rel->relkind == RELKIND_INDEX)
1149 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1150 errmsg("\"%s\" is an index",
1151 RelationGetRelationName(r))));
1152 else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
1154 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1155 errmsg("\"%s\" is a composite type",
1156 RelationGetRelationName(r))));
1164 * heap_beginscan - begin relation scan
1166 * heap_beginscan_strat offers an extended API that lets the caller control
1167 * whether a nondefault buffer access strategy can be used, and whether
1168 * syncscan can be chosen (possibly resulting in the scan not starting from
1169 * block zero). Both of these default to TRUE with plain heap_beginscan.
1171 * heap_beginscan_bm is an alternative entry point for setting up a
1172 * HeapScanDesc for a bitmap heap scan. Although that scan technology is
1173 * really quite unlike a standard seqscan, there is just enough commonality
1174 * to make it worth using the same data structure.
1178 heap_beginscan(Relation relation, Snapshot snapshot,
1179 int nkeys, ScanKey key)
1181 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1186 heap_beginscan_strat(Relation relation, Snapshot snapshot,
1187 int nkeys, ScanKey key,
1188 bool allow_strat, bool allow_sync)
1190 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1191 allow_strat, allow_sync, false);
1195 heap_beginscan_bm(Relation relation, Snapshot snapshot,
1196 int nkeys, ScanKey key)
1198 return heap_beginscan_internal(relation, snapshot, nkeys, key,
1199 false, false, true);
1203 heap_beginscan_internal(Relation relation, Snapshot snapshot,
1204 int nkeys, ScanKey key,
1205 bool allow_strat, bool allow_sync,
1211 * increment relation ref count while scanning relation
1213 * This is just to make really sure the relcache entry won't go away while
1214 * the scan has a pointer to it. Caller should be holding the rel open
1215 * anyway, so this is redundant in all normal scenarios...
1217 RelationIncrementReferenceCount(relation);
1220 * allocate and initialize scan descriptor
1222 scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1224 scan->rs_rd = relation;
1225 scan->rs_snapshot = snapshot;
1226 scan->rs_nkeys = nkeys;
1227 scan->rs_bitmapscan = is_bitmapscan;
1228 scan->rs_strategy = NULL; /* set in initscan */
1229 scan->rs_allow_strat = allow_strat;
1230 scan->rs_allow_sync = allow_sync;
1231 scan->rs_relpredicatelocked = false;
1234 * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1236 scan->rs_pageatatime = IsMVCCSnapshot(snapshot);
1238 /* we only need to set this up once */
1239 scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1242 * we do this here instead of in initscan() because heap_rescan also calls
1243 * initscan() and we don't want to allocate memory again
1246 scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1248 scan->rs_key = NULL;
1250 initscan(scan, key, false);
1256 * heap_rescan - restart a relation scan
1260 heap_rescan(HeapScanDesc scan,
1264 * unpin scan buffers
1266 if (BufferIsValid(scan->rs_cbuf))
1267 ReleaseBuffer(scan->rs_cbuf);
1270 * reinitialize scan descriptor
1272 initscan(scan, key, true);
1276 * heap_endscan - end relation scan
1278 * See how to integrate with index scans.
1279 * Check handling if reldesc caching.
1283 heap_endscan(HeapScanDesc scan)
1285 /* Note: no locking manipulations needed */
1288 * unpin scan buffers
1290 if (BufferIsValid(scan->rs_cbuf))
1291 ReleaseBuffer(scan->rs_cbuf);
1294 * decrement relation reference count and free scan descriptor storage
1296 RelationDecrementReferenceCount(scan->rs_rd);
1299 pfree(scan->rs_key);
1301 if (scan->rs_strategy != NULL)
1302 FreeAccessStrategy(scan->rs_strategy);
1308 * heap_getnext - retrieve next tuple in scan
1310 * Fix to work with index relations.
1311 * We don't return the buffer anymore, but you can get it from the
1312 * returned HeapTuple.
1317 #define HEAPDEBUG_1 \
1318 elog(DEBUG2, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
1319 RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction)
1320 #define HEAPDEBUG_2 \
1321 elog(DEBUG2, "heap_getnext returning EOS")
1322 #define HEAPDEBUG_3 \
1323 elog(DEBUG2, "heap_getnext returning tuple")
1328 #endif /* !defined(HEAPDEBUGALL) */
1332 heap_getnext(HeapScanDesc scan, ScanDirection direction)
1334 /* Note: no locking manipulations needed */
1336 HEAPDEBUG_1; /* heap_getnext( info ) */
1338 if (scan->rs_pageatatime)
1339 heapgettup_pagemode(scan, direction,
1340 scan->rs_nkeys, scan->rs_key);
1342 heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1344 if (scan->rs_ctup.t_data == NULL)
1346 HEAPDEBUG_2; /* heap_getnext returning EOS */
1351 * if we get here it means we have a new current scan tuple, so point to
1352 * the proper return buffer and return the tuple.
1354 HEAPDEBUG_3; /* heap_getnext returning tuple */
1356 pgstat_count_heap_getnext(scan->rs_rd);
1358 return &(scan->rs_ctup);
1362 * heap_fetch - retrieve tuple with given tid
1364 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1365 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1366 * against the specified snapshot.
1368 * If successful (tuple found and passes snapshot time qual), then *userbuf
1369 * is set to the buffer holding the tuple and TRUE is returned. The caller
1370 * must unpin the buffer when done with the tuple.
1372 * If the tuple is not found (ie, item number references a deleted slot),
1373 * then tuple->t_data is set to NULL and FALSE is returned.
1375 * If the tuple is found but fails the time qual check, then FALSE is returned
1376 * but tuple->t_data is left pointing to the tuple.
1378 * keep_buf determines what is done with the buffer in the FALSE-result cases.
1379 * When the caller specifies keep_buf = true, we retain the pin on the buffer
1380 * and return it in *userbuf (so the caller must eventually unpin it); when
1381 * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
1383 * stats_relation is the relation to charge the heap_fetch operation against
1384 * for statistical purposes. (This could be the heap rel itself, an
1385 * associated index, or NULL to not count the fetch at all.)
1387 * heap_fetch does not follow HOT chains: only the exact TID requested will
1390 * It is somewhat inconsistent that we ereport() on invalid block number but
1391 * return false on invalid item number. There are a couple of reasons though.
1392 * One is that the caller can relatively easily check the block number for
1393 * validity, but cannot check the item number without reading the page
1394 * himself. Another is that when we are following a t_ctid link, we can be
1395 * reasonably confident that the page number is valid (since VACUUM shouldn't
1396 * truncate off the destination page without having killed the referencing
1397 * tuple first), but the item number might well not be good.
1400 heap_fetch(Relation relation,
1405 Relation stats_relation)
1407 ItemPointer tid = &(tuple->t_self);
1411 OffsetNumber offnum;
1415 * Fetch and pin the appropriate page of the relation.
1417 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1420 * Need share lock on buffer to examine tuple commit status.
1422 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1423 page = BufferGetPage(buffer);
1426 * We'd better check for out-of-range offnum in case of VACUUM since the
1429 offnum = ItemPointerGetOffsetNumber(tid);
1430 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1432 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1437 ReleaseBuffer(buffer);
1438 *userbuf = InvalidBuffer;
1440 tuple->t_data = NULL;
1445 * get the item line pointer corresponding to the requested tid
1447 lp = PageGetItemId(page, offnum);
1450 * Must check for deleted tuple.
1452 if (!ItemIdIsNormal(lp))
1454 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1459 ReleaseBuffer(buffer);
1460 *userbuf = InvalidBuffer;
1462 tuple->t_data = NULL;
1467 * fill in *tuple fields
1469 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1470 tuple->t_len = ItemIdGetLength(lp);
1471 tuple->t_tableOid = RelationGetRelid(relation);
1474 * check time qualification of tuple, then release lock
1476 valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1479 PredicateLockTuple(relation, tuple, snapshot);
1481 CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1483 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1488 * All checks passed, so return the tuple as valid. Caller is now
1489 * responsible for releasing the buffer.
1493 /* Count the successful fetch against appropriate rel, if any */
1494 if (stats_relation != NULL)
1495 pgstat_count_heap_fetch(stats_relation);
1500 /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1505 ReleaseBuffer(buffer);
1506 *userbuf = InvalidBuffer;
1513 * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1515 * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1516 * of a HOT chain), and buffer is the buffer holding this tuple. We search
1517 * for the first chain member satisfying the given snapshot. If one is
1518 * found, we update *tid to reference that tuple's offset number, and
1519 * return TRUE. If no match, return FALSE without modifying *tid.
1521 * heapTuple is a caller-supplied buffer. When a match is found, we return
1522 * the tuple here, in addition to updating *tid. If no match is found, the
1523 * contents of this buffer on return are undefined.
1525 * If all_dead is not NULL, we check non-visible tuples to see if they are
1526 * globally dead; *all_dead is set TRUE if all members of the HOT chain
1527 * are vacuumable, FALSE if not.
1529 * Unlike heap_fetch, the caller must already have pin and (at least) share
1530 * lock on the buffer; it is still pinned/locked at exit. Also unlike
1531 * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
1534 heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
1535 Snapshot snapshot, HeapTuple heapTuple,
1536 bool *all_dead, bool first_call)
1538 Page dp = (Page) BufferGetPage(buffer);
1539 TransactionId prev_xmax = InvalidTransactionId;
1540 OffsetNumber offnum;
1541 bool at_chain_start;
1545 /* If this is not the first call, previous call returned a (live!) tuple */
1547 *all_dead = first_call;
1549 Assert(TransactionIdIsValid(RecentGlobalXmin));
1551 Assert(ItemPointerGetBlockNumber(tid) == BufferGetBlockNumber(buffer));
1552 offnum = ItemPointerGetOffsetNumber(tid);
1553 at_chain_start = first_call;
1556 /* Scan through possible multiple members of HOT-chain */
1561 /* check for bogus TID */
1562 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
1565 lp = PageGetItemId(dp, offnum);
1567 /* check for unused, dead, or redirected items */
1568 if (!ItemIdIsNormal(lp))
1570 /* We should only see a redirect at start of chain */
1571 if (ItemIdIsRedirected(lp) && at_chain_start)
1573 /* Follow the redirect */
1574 offnum = ItemIdGetRedirect(lp);
1575 at_chain_start = false;
1578 /* else must be end of chain */
1582 heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
1583 heapTuple->t_len = ItemIdGetLength(lp);
1584 heapTuple->t_tableOid = relation->rd_id;
1585 heapTuple->t_self = *tid;
1588 * Shouldn't see a HEAP_ONLY tuple at chain start.
1590 if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1594 * The xmin should match the previous xmax value, else chain is
1597 if (TransactionIdIsValid(prev_xmax) &&
1598 !TransactionIdEquals(prev_xmax,
1599 HeapTupleHeaderGetXmin(heapTuple->t_data)))
1603 * When first_call is true (and thus, skip is initally false) we'll
1604 * return the first tuple we find. But on later passes, heapTuple
1605 * will initially be pointing to the tuple we returned last time.
1606 * Returning it again would be incorrect (and would loop forever),
1607 * so we skip it and return the next match we find.
1611 /* If it's visible per the snapshot, we must return it */
1612 valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1613 CheckForSerializableConflictOut(valid, relation, heapTuple,
1617 ItemPointerSetOffsetNumber(tid, offnum);
1618 PredicateLockTuple(relation, heapTuple, snapshot);
1627 * If we can't see it, maybe no one else can either. At caller
1628 * request, check whether all chain members are dead to all
1631 if (all_dead && *all_dead &&
1632 HeapTupleSatisfiesVacuum(heapTuple->t_data, RecentGlobalXmin,
1633 buffer) != HEAPTUPLE_DEAD)
1637 * Check to see if HOT chain continues past this tuple; if so fetch
1638 * the next offnum and loop around.
1640 if (HeapTupleIsHotUpdated(heapTuple))
1642 Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
1643 ItemPointerGetBlockNumber(tid));
1644 offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1645 at_chain_start = false;
1646 prev_xmax = HeapTupleHeaderGetXmax(heapTuple->t_data);
1649 break; /* end of chain */
1656 * heap_hot_search - search HOT chain for tuple satisfying snapshot
1658 * This has the same API as heap_hot_search_buffer, except that the caller
1659 * does not provide the buffer containing the page, rather we access it
1663 heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
1668 HeapTupleData heapTuple;
1670 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1671 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1672 result = heap_hot_search_buffer(tid, relation, buffer, snapshot,
1673 &heapTuple, all_dead, true);
1674 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1675 ReleaseBuffer(buffer);
1680 * heap_get_latest_tid - get the latest tid of a specified tuple
1682 * Actually, this gets the latest version that is visible according to
1683 * the passed snapshot. You can pass SnapshotDirty to get the very latest,
1684 * possibly uncommitted version.
1686 * *tid is both an input and an output parameter: it is updated to
1687 * show the latest version of the row. Note that it will not be changed
1688 * if no version of the row passes the snapshot test.
1691 heap_get_latest_tid(Relation relation,
1696 ItemPointerData ctid;
1697 TransactionId priorXmax;
1699 /* this is to avoid Assert failures on bad input */
1700 if (!ItemPointerIsValid(tid))
1704 * Since this can be called with user-supplied TID, don't trust the input
1705 * too much. (RelationGetNumberOfBlocks is an expensive check, so we
1706 * don't check t_ctid links again this way. Note that it would not do to
1707 * call it just once and save the result, either.)
1709 blk = ItemPointerGetBlockNumber(tid);
1710 if (blk >= RelationGetNumberOfBlocks(relation))
1711 elog(ERROR, "block number %u is out of range for relation \"%s\"",
1712 blk, RelationGetRelationName(relation));
1715 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1716 * need to examine, and *tid is the TID we will return if ctid turns out
1719 * Note that we will loop until we reach the end of the t_ctid chain.
1720 * Depending on the snapshot passed, there might be at most one visible
1721 * version of the row, but we don't try to optimize for that.
1724 priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1729 OffsetNumber offnum;
1735 * Read, pin, and lock the page.
1737 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1738 LockBuffer(buffer, BUFFER_LOCK_SHARE);
1739 page = BufferGetPage(buffer);
1742 * Check for bogus item number. This is not treated as an error
1743 * condition because it can happen while following a t_ctid link. We
1744 * just assume that the prior tid is OK and return it unchanged.
1746 offnum = ItemPointerGetOffsetNumber(&ctid);
1747 if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1749 UnlockReleaseBuffer(buffer);
1752 lp = PageGetItemId(page, offnum);
1753 if (!ItemIdIsNormal(lp))
1755 UnlockReleaseBuffer(buffer);
1759 /* OK to access the tuple */
1761 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1762 tp.t_len = ItemIdGetLength(lp);
1765 * After following a t_ctid link, we might arrive at an unrelated
1766 * tuple. Check for XMIN match.
1768 if (TransactionIdIsValid(priorXmax) &&
1769 !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
1771 UnlockReleaseBuffer(buffer);
1776 * Check time qualification of tuple; if visible, set it as the new
1779 valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1780 CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1785 * If there's a valid t_ctid link, follow it, else we're done.
1787 if ((tp.t_data->t_infomask & (HEAP_XMAX_INVALID | HEAP_IS_LOCKED)) ||
1788 ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
1790 UnlockReleaseBuffer(buffer);
1794 ctid = tp.t_data->t_ctid;
1795 priorXmax = HeapTupleHeaderGetXmax(tp.t_data);
1796 UnlockReleaseBuffer(buffer);
1802 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
1804 * This is called after we have waited for the XMAX transaction to terminate.
1805 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
1806 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
1807 * hint bit if possible --- but beware that that may not yet be possible,
1808 * if the transaction committed asynchronously. Hence callers should look
1809 * only at XMAX_INVALID.
1812 UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
1814 Assert(TransactionIdEquals(HeapTupleHeaderGetXmax(tuple), xid));
1816 if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
1818 if (TransactionIdDidCommit(xid))
1819 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
1822 HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
1823 InvalidTransactionId);
1829 * GetBulkInsertState - prepare status object for a bulk insert
1832 GetBulkInsertState(void)
1834 BulkInsertState bistate;
1836 bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
1837 bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
1838 bistate->current_buf = InvalidBuffer;
1843 * FreeBulkInsertState - clean up after finishing a bulk insert
1846 FreeBulkInsertState(BulkInsertState bistate)
1848 if (bistate->current_buf != InvalidBuffer)
1849 ReleaseBuffer(bistate->current_buf);
1850 FreeAccessStrategy(bistate->strategy);
1856 * heap_insert - insert tuple into a heap
1858 * The new tuple is stamped with current transaction ID and the specified
1861 * If the HEAP_INSERT_SKIP_WAL option is specified, the new tuple is not
1862 * logged in WAL, even for a non-temp relation. Safe usage of this behavior
1863 * requires that we arrange that all new tuples go into new pages not
1864 * containing any tuples from other transactions, and that the relation gets
1865 * fsync'd before commit. (See also heap_sync() comments)
1867 * The HEAP_INSERT_SKIP_FSM option is passed directly to
1868 * RelationGetBufferForTuple, which see for more info.
1870 * Note that these options will be applied when inserting into the heap's
1871 * TOAST table, too, if the tuple requires any out-of-line data.
1873 * The BulkInsertState object (if any; bistate can be NULL for default
1874 * behavior) is also just passed through to RelationGetBufferForTuple.
1876 * The return value is the OID assigned to the tuple (either here or by the
1877 * caller), or InvalidOid if no OID. The header fields of *tup are updated
1878 * to match the stored tuple; in particular tup->t_self receives the actual
1879 * TID where the tuple was stored. But note that any toasting of fields
1880 * within the tuple data is NOT reflected into *tup.
1883 heap_insert(Relation relation, HeapTuple tup, CommandId cid,
1884 int options, BulkInsertState bistate)
1886 TransactionId xid = GetCurrentTransactionId();
1889 Buffer vmbuffer = InvalidBuffer;
1890 bool all_visible_cleared = false;
1892 if (relation->rd_rel->relhasoids)
1895 /* this is redundant with an Assert in HeapTupleSetOid */
1896 Assert(tup->t_data->t_infomask & HEAP_HASOID);
1900 * If the object id of this tuple has already been assigned, trust the
1901 * caller. There are a couple of ways this can happen. At initial db
1902 * creation, the backend program sets oids for tuples. When we define
1903 * an index, we set the oid. Finally, in the future, we may allow
1904 * users to set their own object ids in order to support a persistent
1905 * object store (objects need to contain pointers to one another).
1907 if (!OidIsValid(HeapTupleGetOid(tup)))
1908 HeapTupleSetOid(tup, GetNewOid(relation));
1912 /* check there is not space for an OID */
1913 Assert(!(tup->t_data->t_infomask & HEAP_HASOID));
1916 tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
1917 tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
1918 tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
1919 HeapTupleHeaderSetXmin(tup->t_data, xid);
1920 HeapTupleHeaderSetCmin(tup->t_data, cid);
1921 HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
1922 tup->t_tableOid = RelationGetRelid(relation);
1925 * If the new tuple is too big for storage or contains already toasted
1926 * out-of-line attributes from some other relation, invoke the toaster.
1928 * Note: below this point, heaptup is the data we actually intend to store
1929 * into the relation; tup is the caller's original untoasted data.
1931 if (relation->rd_rel->relkind != RELKIND_RELATION)
1933 /* toast table entries should never be recursively toasted */
1934 Assert(!HeapTupleHasExternal(tup));
1937 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
1938 heaptup = toast_insert_or_update(relation, tup, NULL, options);
1943 * Find buffer to insert this tuple into. If the page is all visible,
1944 * this will also pin the requisite visibility map page.
1946 buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
1947 InvalidBuffer, options, bistate,
1951 * We're about to do the actual insert -- check for conflict at the
1952 * relation or buffer level first, to avoid possibly having to roll back
1953 * work we've just done.
1955 CheckForSerializableConflictIn(relation, NULL, buffer);
1957 /* NO EREPORT(ERROR) from here till changes are logged */
1958 START_CRIT_SECTION();
1960 RelationPutHeapTuple(relation, buffer, heaptup);
1962 if (PageIsAllVisible(BufferGetPage(buffer)))
1964 all_visible_cleared = true;
1965 PageClearAllVisible(BufferGetPage(buffer));
1966 visibilitymap_clear(relation,
1967 ItemPointerGetBlockNumber(&(heaptup->t_self)),
1972 * XXX Should we set PageSetPrunable on this page ?
1974 * The inserting transaction may eventually abort thus making this tuple
1975 * DEAD and hence available for pruning. Though we don't want to optimize
1976 * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
1977 * aborted tuple will never be pruned until next vacuum is triggered.
1979 * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
1982 MarkBufferDirty(buffer);
1985 if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
1987 xl_heap_insert xlrec;
1988 xl_heap_header xlhdr;
1990 XLogRecData rdata[3];
1991 Page page = BufferGetPage(buffer);
1992 uint8 info = XLOG_HEAP_INSERT;
1994 xlrec.all_visible_cleared = all_visible_cleared;
1995 xlrec.target.node = relation->rd_node;
1996 xlrec.target.tid = heaptup->t_self;
1997 rdata[0].data = (char *) &xlrec;
1998 rdata[0].len = SizeOfHeapInsert;
1999 rdata[0].buffer = InvalidBuffer;
2000 rdata[0].next = &(rdata[1]);
2002 xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2003 xlhdr.t_infomask = heaptup->t_data->t_infomask;
2004 xlhdr.t_hoff = heaptup->t_data->t_hoff;
2007 * note we mark rdata[1] as belonging to buffer; if XLogInsert decides
2008 * to write the whole page to the xlog, we don't need to store
2009 * xl_heap_header in the xlog.
2011 rdata[1].data = (char *) &xlhdr;
2012 rdata[1].len = SizeOfHeapHeader;
2013 rdata[1].buffer = buffer;
2014 rdata[1].buffer_std = true;
2015 rdata[1].next = &(rdata[2]);
2017 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2018 rdata[2].data = (char *) heaptup->t_data + offsetof(HeapTupleHeaderData, t_bits);
2019 rdata[2].len = heaptup->t_len - offsetof(HeapTupleHeaderData, t_bits);
2020 rdata[2].buffer = buffer;
2021 rdata[2].buffer_std = true;
2022 rdata[2].next = NULL;
2025 * If this is the single and first tuple on page, we can reinit the
2026 * page instead of restoring the whole thing. Set flag, and hide
2027 * buffer references from XLogInsert.
2029 if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2030 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
2032 info |= XLOG_HEAP_INIT_PAGE;
2033 rdata[1].buffer = rdata[2].buffer = InvalidBuffer;
2036 recptr = XLogInsert(RM_HEAP_ID, info, rdata);
2038 PageSetLSN(page, recptr);
2039 PageSetTLI(page, ThisTimeLineID);
2044 UnlockReleaseBuffer(buffer);
2045 if (vmbuffer != InvalidBuffer)
2046 ReleaseBuffer(vmbuffer);
2049 * If tuple is cachable, mark it for invalidation from the caches in case
2050 * we abort. Note it is OK to do this after releasing the buffer, because
2051 * the heaptup data structure is all in local memory, not in the shared
2054 CacheInvalidateHeapTuple(relation, heaptup);
2056 pgstat_count_heap_insert(relation);
2059 * If heaptup is a private copy, release it. Don't forget to copy t_self
2060 * back to the caller's image, too.
2064 tup->t_self = heaptup->t_self;
2065 heap_freetuple(heaptup);
2068 return HeapTupleGetOid(tup);
2072 * simple_heap_insert - insert a tuple
2074 * Currently, this routine differs from heap_insert only in supplying
2075 * a default command ID and not allowing access to the speedup options.
2077 * This should be used rather than using heap_insert directly in most places
2078 * where we are modifying system catalogs.
2081 simple_heap_insert(Relation relation, HeapTuple tup)
2083 return heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
2087 * heap_delete - delete a tuple
2089 * NB: do not call this directly unless you are prepared to deal with
2090 * concurrent-update conditions. Use simple_heap_delete instead.
2092 * relation - table to be modified (caller must hold suitable lock)
2093 * tid - TID of tuple to be deleted
2094 * ctid - output parameter, used only for failure case (see below)
2095 * update_xmax - output parameter, used only for failure case (see below)
2096 * cid - delete command ID (used for visibility test, and stored into
2097 * cmax if successful)
2098 * crosscheck - if not InvalidSnapshot, also check tuple against this
2099 * wait - true if should wait for any conflicting update to commit/abort
2101 * Normal, successful return value is HeapTupleMayBeUpdated, which
2102 * actually means we did delete it. Failure return codes are
2103 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2104 * (the last only possible if wait == false).
2106 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2107 * If t_ctid is the same as tid, the tuple was deleted; if different, the
2108 * tuple was updated, and t_ctid is the location of the replacement tuple.
2109 * (t_xmax is needed to verify that the replacement tuple matches.)
2112 heap_delete(Relation relation, ItemPointer tid,
2113 ItemPointer ctid, TransactionId *update_xmax,
2114 CommandId cid, Snapshot crosscheck, bool wait)
2117 TransactionId xid = GetCurrentTransactionId();
2123 Buffer vmbuffer = InvalidBuffer;
2124 bool have_tuple_lock = false;
2126 bool all_visible_cleared = false;
2128 Assert(ItemPointerIsValid(tid));
2130 block = ItemPointerGetBlockNumber(tid);
2131 buffer = ReadBuffer(relation, block);
2132 page = BufferGetPage(buffer);
2135 * Before locking the buffer, pin the visibility map page if it appears
2136 * to be necessary. Since we haven't got the lock yet, someone else might
2137 * be in the middle of changing this, so we'll need to recheck after
2140 if (PageIsAllVisible(page))
2141 visibilitymap_pin(relation, block, &vmbuffer);
2143 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2146 * If we didn't pin the visibility map page and the page has become all
2147 * visible while we were busy locking the buffer, we'll have to unlock and
2148 * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2149 * unfortunate, but hopefully shouldn't happen often.
2151 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2153 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2154 visibilitymap_pin(relation, block, &vmbuffer);
2155 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2158 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2159 Assert(ItemIdIsNormal(lp));
2161 tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2162 tp.t_len = ItemIdGetLength(lp);
2166 result = HeapTupleSatisfiesUpdate(tp.t_data, cid, buffer);
2168 if (result == HeapTupleInvisible)
2170 UnlockReleaseBuffer(buffer);
2171 elog(ERROR, "attempted to delete invisible tuple");
2173 else if (result == HeapTupleBeingUpdated && wait)
2175 TransactionId xwait;
2178 /* must copy state data before unlocking buffer */
2179 xwait = HeapTupleHeaderGetXmax(tp.t_data);
2180 infomask = tp.t_data->t_infomask;
2182 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2185 * Acquire tuple lock to establish our priority for the tuple (see
2186 * heap_lock_tuple). LockTuple will release us when we are
2187 * next-in-line for the tuple.
2189 * If we are forced to "start over" below, we keep the tuple lock;
2190 * this arranges that we stay at the head of the line while rechecking
2193 if (!have_tuple_lock)
2195 LockTuple(relation, &(tp.t_self), ExclusiveLock);
2196 have_tuple_lock = true;
2200 * Sleep until concurrent transaction ends. Note that we don't care
2201 * if the locker has an exclusive or shared lock, because we need
2205 if (infomask & HEAP_XMAX_IS_MULTI)
2207 /* wait for multixact */
2208 MultiXactIdWait((MultiXactId) xwait);
2209 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2212 * If xwait had just locked the tuple then some other xact could
2213 * update this tuple before we get to this point. Check for xmax
2214 * change, and start over if so.
2216 if (!(tp.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
2217 !TransactionIdEquals(HeapTupleHeaderGetXmax(tp.t_data),
2222 * You might think the multixact is necessarily done here, but not
2223 * so: it could have surviving members, namely our own xact or
2224 * other subxacts of this backend. It is legal for us to delete
2225 * the tuple in either case, however (the latter case is
2226 * essentially a situation of upgrading our former shared lock to
2227 * exclusive). We don't bother changing the on-disk hint bits
2228 * since we are about to overwrite the xmax altogether.
2233 /* wait for regular transaction to end */
2234 XactLockTableWait(xwait);
2235 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2238 * xwait is done, but if xwait had just locked the tuple then some
2239 * other xact could update this tuple before we get to this point.
2240 * Check for xmax change, and start over if so.
2242 if ((tp.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
2243 !TransactionIdEquals(HeapTupleHeaderGetXmax(tp.t_data),
2247 /* Otherwise check if it committed or aborted */
2248 UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2252 * We may overwrite if previous xmax aborted, or if it committed but
2253 * only locked the tuple without updating it.
2255 if (tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
2257 result = HeapTupleMayBeUpdated;
2259 result = HeapTupleUpdated;
2262 if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
2264 /* Perform additional check for transaction-snapshot mode RI updates */
2265 if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2266 result = HeapTupleUpdated;
2269 if (result != HeapTupleMayBeUpdated)
2271 Assert(result == HeapTupleSelfUpdated ||
2272 result == HeapTupleUpdated ||
2273 result == HeapTupleBeingUpdated);
2274 Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
2275 *ctid = tp.t_data->t_ctid;
2276 *update_xmax = HeapTupleHeaderGetXmax(tp.t_data);
2277 UnlockReleaseBuffer(buffer);
2278 if (have_tuple_lock)
2279 UnlockTuple(relation, &(tp.t_self), ExclusiveLock);
2280 if (vmbuffer != InvalidBuffer)
2281 ReleaseBuffer(vmbuffer);
2286 * We're about to do the actual delete -- check for conflict first, to
2287 * avoid possibly having to roll back work we've just done.
2289 CheckForSerializableConflictIn(relation, &tp, buffer);
2291 /* replace cid with a combo cid if necessary */
2292 HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
2294 START_CRIT_SECTION();
2297 * If this transaction commits, the tuple will become DEAD sooner or
2298 * later. Set flag that this page is a candidate for pruning once our xid
2299 * falls below the OldestXmin horizon. If the transaction finally aborts,
2300 * the subsequent page pruning will be a no-op and the hint will be
2303 PageSetPrunable(page, xid);
2305 if (PageIsAllVisible(page))
2307 all_visible_cleared = true;
2308 PageClearAllVisible(page);
2309 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
2313 /* store transaction information of xact deleting the tuple */
2314 tp.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
2316 HEAP_XMAX_IS_MULTI |
2319 HeapTupleHeaderClearHotUpdated(tp.t_data);
2320 HeapTupleHeaderSetXmax(tp.t_data, xid);
2321 HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
2322 /* Make sure there is no forward chain link in t_ctid */
2323 tp.t_data->t_ctid = tp.t_self;
2325 MarkBufferDirty(buffer);
2328 if (RelationNeedsWAL(relation))
2330 xl_heap_delete xlrec;
2332 XLogRecData rdata[2];
2334 xlrec.all_visible_cleared = all_visible_cleared;
2335 xlrec.target.node = relation->rd_node;
2336 xlrec.target.tid = tp.t_self;
2337 rdata[0].data = (char *) &xlrec;
2338 rdata[0].len = SizeOfHeapDelete;
2339 rdata[0].buffer = InvalidBuffer;
2340 rdata[0].next = &(rdata[1]);
2342 rdata[1].data = NULL;
2344 rdata[1].buffer = buffer;
2345 rdata[1].buffer_std = true;
2346 rdata[1].next = NULL;
2348 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE, rdata);
2350 PageSetLSN(page, recptr);
2351 PageSetTLI(page, ThisTimeLineID);
2356 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2358 if (vmbuffer != InvalidBuffer)
2359 ReleaseBuffer(vmbuffer);
2362 * If the tuple has toasted out-of-line attributes, we need to delete
2363 * those items too. We have to do this before releasing the buffer
2364 * because we need to look at the contents of the tuple, but it's OK to
2365 * release the content lock on the buffer first.
2367 if (relation->rd_rel->relkind != RELKIND_RELATION)
2369 /* toast table entries should never be recursively toasted */
2370 Assert(!HeapTupleHasExternal(&tp));
2372 else if (HeapTupleHasExternal(&tp))
2373 toast_delete(relation, &tp);
2376 * Mark tuple for invalidation from system caches at next command
2377 * boundary. We have to do this before releasing the buffer because we
2378 * need to look at the contents of the tuple.
2380 CacheInvalidateHeapTuple(relation, &tp);
2382 /* Now we can release the buffer */
2383 ReleaseBuffer(buffer);
2386 * Release the lmgr tuple lock, if we had it.
2388 if (have_tuple_lock)
2389 UnlockTuple(relation, &(tp.t_self), ExclusiveLock);
2391 pgstat_count_heap_delete(relation);
2393 return HeapTupleMayBeUpdated;
2397 * simple_heap_delete - delete a tuple
2399 * This routine may be used to delete a tuple when concurrent updates of
2400 * the target tuple are not expected (for example, because we have a lock
2401 * on the relation associated with the tuple). Any failure is reported
2405 simple_heap_delete(Relation relation, ItemPointer tid)
2408 ItemPointerData update_ctid;
2409 TransactionId update_xmax;
2411 result = heap_delete(relation, tid,
2412 &update_ctid, &update_xmax,
2413 GetCurrentCommandId(true), InvalidSnapshot,
2414 true /* wait for commit */ );
2417 case HeapTupleSelfUpdated:
2418 /* Tuple was already updated in current command? */
2419 elog(ERROR, "tuple already updated by self");
2422 case HeapTupleMayBeUpdated:
2423 /* done successfully */
2426 case HeapTupleUpdated:
2427 elog(ERROR, "tuple concurrently updated");
2431 elog(ERROR, "unrecognized heap_delete status: %u", result);
2437 * heap_update - replace a tuple
2439 * NB: do not call this directly unless you are prepared to deal with
2440 * concurrent-update conditions. Use simple_heap_update instead.
2442 * relation - table to be modified (caller must hold suitable lock)
2443 * otid - TID of old tuple to be replaced
2444 * newtup - newly constructed tuple data to store
2445 * ctid - output parameter, used only for failure case (see below)
2446 * update_xmax - output parameter, used only for failure case (see below)
2447 * cid - update command ID (used for visibility test, and stored into
2448 * cmax/cmin if successful)
2449 * crosscheck - if not InvalidSnapshot, also check old tuple against this
2450 * wait - true if should wait for any conflicting update to commit/abort
2452 * Normal, successful return value is HeapTupleMayBeUpdated, which
2453 * actually means we *did* update it. Failure return codes are
2454 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2455 * (the last only possible if wait == false).
2457 * On success, the header fields of *newtup are updated to match the new
2458 * stored tuple; in particular, newtup->t_self is set to the TID where the
2459 * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
2460 * update was done. However, any TOAST changes in the new tuple's
2461 * data are not reflected into *newtup.
2463 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2464 * If t_ctid is the same as otid, the tuple was deleted; if different, the
2465 * tuple was updated, and t_ctid is the location of the replacement tuple.
2466 * (t_xmax is needed to verify that the replacement tuple matches.)
2469 heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
2470 ItemPointer ctid, TransactionId *update_xmax,
2471 CommandId cid, Snapshot crosscheck, bool wait)
2474 TransactionId xid = GetCurrentTransactionId();
2475 Bitmapset *hot_attrs;
2477 HeapTupleData oldtup;
2483 vmbuffer = InvalidBuffer,
2484 vmbuffer_new = InvalidBuffer;
2489 bool have_tuple_lock = false;
2491 bool use_hot_update = false;
2492 bool all_visible_cleared = false;
2493 bool all_visible_cleared_new = false;
2495 Assert(ItemPointerIsValid(otid));
2498 * Fetch the list of attributes to be checked for HOT update. This is
2499 * wasted effort if we fail to update or have to put the new tuple on a
2500 * different page. But we must compute the list before obtaining buffer
2501 * lock --- in the worst case, if we are doing an update on one of the
2502 * relevant system catalogs, we could deadlock if we try to fetch the list
2503 * later. In any case, the relcache caches the data so this is usually
2506 * Note that we get a copy here, so we need not worry about relcache flush
2507 * happening midway through.
2509 hot_attrs = RelationGetIndexAttrBitmap(relation);
2511 block = ItemPointerGetBlockNumber(otid);
2512 buffer = ReadBuffer(relation, block);
2513 page = BufferGetPage(buffer);
2516 * Before locking the buffer, pin the visibility map page if it appears
2517 * to be necessary. Since we haven't got the lock yet, someone else might
2518 * be in the middle of changing this, so we'll need to recheck after
2521 if (PageIsAllVisible(page))
2522 visibilitymap_pin(relation, block, &vmbuffer);
2524 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2526 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
2527 Assert(ItemIdIsNormal(lp));
2529 oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2530 oldtup.t_len = ItemIdGetLength(lp);
2531 oldtup.t_self = *otid;
2534 * Note: beyond this point, use oldtup not otid to refer to old tuple.
2535 * otid may very well point at newtup->t_self, which we will overwrite
2536 * with the new tuple's location, so there's great risk of confusion if we
2541 result = HeapTupleSatisfiesUpdate(oldtup.t_data, cid, buffer);
2543 if (result == HeapTupleInvisible)
2545 UnlockReleaseBuffer(buffer);
2546 elog(ERROR, "attempted to update invisible tuple");
2548 else if (result == HeapTupleBeingUpdated && wait)
2550 TransactionId xwait;
2553 /* must copy state data before unlocking buffer */
2554 xwait = HeapTupleHeaderGetXmax(oldtup.t_data);
2555 infomask = oldtup.t_data->t_infomask;
2557 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2560 * Acquire tuple lock to establish our priority for the tuple (see
2561 * heap_lock_tuple). LockTuple will release us when we are
2562 * next-in-line for the tuple.
2564 * If we are forced to "start over" below, we keep the tuple lock;
2565 * this arranges that we stay at the head of the line while rechecking
2568 if (!have_tuple_lock)
2570 LockTuple(relation, &(oldtup.t_self), ExclusiveLock);
2571 have_tuple_lock = true;
2575 * Sleep until concurrent transaction ends. Note that we don't care
2576 * if the locker has an exclusive or shared lock, because we need
2580 if (infomask & HEAP_XMAX_IS_MULTI)
2582 /* wait for multixact */
2583 MultiXactIdWait((MultiXactId) xwait);
2584 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2587 * If xwait had just locked the tuple then some other xact could
2588 * update this tuple before we get to this point. Check for xmax
2589 * change, and start over if so.
2591 if (!(oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
2592 !TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup.t_data),
2597 * You might think the multixact is necessarily done here, but not
2598 * so: it could have surviving members, namely our own xact or
2599 * other subxacts of this backend. It is legal for us to update
2600 * the tuple in either case, however (the latter case is
2601 * essentially a situation of upgrading our former shared lock to
2602 * exclusive). We don't bother changing the on-disk hint bits
2603 * since we are about to overwrite the xmax altogether.
2608 /* wait for regular transaction to end */
2609 XactLockTableWait(xwait);
2610 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2613 * xwait is done, but if xwait had just locked the tuple then some
2614 * other xact could update this tuple before we get to this point.
2615 * Check for xmax change, and start over if so.
2617 if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
2618 !TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup.t_data),
2622 /* Otherwise check if it committed or aborted */
2623 UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
2627 * We may overwrite if previous xmax aborted, or if it committed but
2628 * only locked the tuple without updating it.
2630 if (oldtup.t_data->t_infomask & (HEAP_XMAX_INVALID |
2632 result = HeapTupleMayBeUpdated;
2634 result = HeapTupleUpdated;
2637 if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
2639 /* Perform additional check for transaction-snapshot mode RI updates */
2640 if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
2641 result = HeapTupleUpdated;
2644 if (result != HeapTupleMayBeUpdated)
2646 Assert(result == HeapTupleSelfUpdated ||
2647 result == HeapTupleUpdated ||
2648 result == HeapTupleBeingUpdated);
2649 Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
2650 *ctid = oldtup.t_data->t_ctid;
2651 *update_xmax = HeapTupleHeaderGetXmax(oldtup.t_data);
2652 UnlockReleaseBuffer(buffer);
2653 if (have_tuple_lock)
2654 UnlockTuple(relation, &(oldtup.t_self), ExclusiveLock);
2655 if (vmbuffer != InvalidBuffer)
2656 ReleaseBuffer(vmbuffer);
2657 bms_free(hot_attrs);
2662 * If we didn't pin the visibility map page and the page has become all
2663 * visible while we were busy locking the buffer, or during some subsequent
2664 * window during which we had it unlocked, we'll have to unlock and
2665 * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2666 * unfortunate, but hopefully shouldn't happen often.
2668 if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2670 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2671 visibilitymap_pin(relation, block, &vmbuffer);
2672 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2676 * We're about to do the actual update -- check for conflict first, to
2677 * avoid possibly having to roll back work we've just done.
2679 CheckForSerializableConflictIn(relation, &oldtup, buffer);
2681 /* Fill in OID and transaction status data for newtup */
2682 if (relation->rd_rel->relhasoids)
2685 /* this is redundant with an Assert in HeapTupleSetOid */
2686 Assert(newtup->t_data->t_infomask & HEAP_HASOID);
2688 HeapTupleSetOid(newtup, HeapTupleGetOid(&oldtup));
2692 /* check there is not space for an OID */
2693 Assert(!(newtup->t_data->t_infomask & HEAP_HASOID));
2696 newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2697 newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2698 newtup->t_data->t_infomask |= (HEAP_XMAX_INVALID | HEAP_UPDATED);
2699 HeapTupleHeaderSetXmin(newtup->t_data, xid);
2700 HeapTupleHeaderSetCmin(newtup->t_data, cid);
2701 HeapTupleHeaderSetXmax(newtup->t_data, 0); /* for cleanliness */
2702 newtup->t_tableOid = RelationGetRelid(relation);
2705 * Replace cid with a combo cid if necessary. Note that we already put
2706 * the plain cid into the new tuple.
2708 HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
2711 * If the toaster needs to be activated, OR if the new tuple will not fit
2712 * on the same page as the old, then we need to release the content lock
2713 * (but not the pin!) on the old tuple's buffer while we are off doing
2714 * TOAST and/or table-file-extension work. We must mark the old tuple to
2715 * show that it's already being updated, else other processes may try to
2716 * update it themselves.
2718 * We need to invoke the toaster if there are already any out-of-line
2719 * toasted values present, or if the new tuple is over-threshold.
2721 if (relation->rd_rel->relkind != RELKIND_RELATION)
2723 /* toast table entries should never be recursively toasted */
2724 Assert(!HeapTupleHasExternal(&oldtup));
2725 Assert(!HeapTupleHasExternal(newtup));
2729 need_toast = (HeapTupleHasExternal(&oldtup) ||
2730 HeapTupleHasExternal(newtup) ||
2731 newtup->t_len > TOAST_TUPLE_THRESHOLD);
2733 pagefree = PageGetHeapFreeSpace(page);
2735 newtupsize = MAXALIGN(newtup->t_len);
2737 if (need_toast || newtupsize > pagefree)
2739 /* Clear obsolete visibility flags ... */
2740 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
2742 HEAP_XMAX_IS_MULTI |
2745 HeapTupleClearHotUpdated(&oldtup);
2746 /* ... and store info about transaction updating this tuple */
2747 HeapTupleHeaderSetXmax(oldtup.t_data, xid);
2748 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
2749 /* temporarily make it look not-updated */
2750 oldtup.t_data->t_ctid = oldtup.t_self;
2751 already_marked = true;
2752 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2755 * Let the toaster do its thing, if needed.
2757 * Note: below this point, heaptup is the data we actually intend to
2758 * store into the relation; newtup is the caller's original untoasted
2763 /* Note we always use WAL and FSM during updates */
2764 heaptup = toast_insert_or_update(relation, newtup, &oldtup, 0);
2765 newtupsize = MAXALIGN(heaptup->t_len);
2771 * Now, do we need a new page for the tuple, or not? This is a bit
2772 * tricky since someone else could have added tuples to the page while
2773 * we weren't looking. We have to recheck the available space after
2774 * reacquiring the buffer lock. But don't bother to do that if the
2775 * former amount of free space is still not enough; it's unlikely
2776 * there's more free now than before.
2778 * What's more, if we need to get a new page, we will need to acquire
2779 * buffer locks on both old and new pages. To avoid deadlock against
2780 * some other backend trying to get the same two locks in the other
2781 * order, we must be consistent about the order we get the locks in.
2782 * We use the rule "lock the lower-numbered page of the relation
2783 * first". To implement this, we must do RelationGetBufferForTuple
2784 * while not holding the lock on the old page, and we must rely on it
2785 * to get the locks on both pages in the correct order.
2787 if (newtupsize > pagefree)
2789 /* Assume there's no chance to put heaptup on same page. */
2790 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
2792 &vmbuffer_new, &vmbuffer);
2796 /* Re-acquire the lock on the old tuple's page. */
2797 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2798 /* Re-check using the up-to-date free space */
2799 pagefree = PageGetHeapFreeSpace(page);
2800 if (newtupsize > pagefree)
2803 * Rats, it doesn't fit anymore. We must now unlock and
2804 * relock to avoid deadlock. Fortunately, this path should
2807 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2808 newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
2810 &vmbuffer_new, &vmbuffer);
2814 /* OK, it fits here, so we're done. */
2821 /* No TOAST work needed, and it'll fit on same page */
2822 already_marked = false;
2828 * We're about to create the new tuple -- check for conflict first, to
2829 * avoid possibly having to roll back work we've just done.
2831 * NOTE: For a tuple insert, we only need to check for table locks, since
2832 * predicate locking at the index level will cover ranges for anything
2833 * except a table scan. Therefore, only provide the relation.
2835 CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
2838 * At this point newbuf and buffer are both pinned and locked, and newbuf
2839 * has enough space for the new tuple. If they are the same buffer, only
2843 if (newbuf == buffer)
2846 * Since the new tuple is going into the same page, we might be able
2847 * to do a HOT update. Check if any of the index columns have been
2848 * changed. If not, then HOT update is possible.
2850 if (HeapSatisfiesHOTUpdate(relation, hot_attrs, &oldtup, heaptup))
2851 use_hot_update = true;
2855 /* Set a hint that the old page could use prune/defrag */
2859 /* NO EREPORT(ERROR) from here till changes are logged */
2860 START_CRIT_SECTION();
2863 * If this transaction commits, the old tuple will become DEAD sooner or
2864 * later. Set flag that this page is a candidate for pruning once our xid
2865 * falls below the OldestXmin horizon. If the transaction finally aborts,
2866 * the subsequent page pruning will be a no-op and the hint will be
2869 * XXX Should we set hint on newbuf as well? If the transaction aborts,
2870 * there would be a prunable tuple in the newbuf; but for now we choose
2871 * not to optimize for aborts. Note that heap_xlog_update must be kept in
2872 * sync if this decision changes.
2874 PageSetPrunable(page, xid);
2878 /* Mark the old tuple as HOT-updated */
2879 HeapTupleSetHotUpdated(&oldtup);
2880 /* And mark the new tuple as heap-only */
2881 HeapTupleSetHeapOnly(heaptup);
2882 /* Mark the caller's copy too, in case different from heaptup */
2883 HeapTupleSetHeapOnly(newtup);
2887 /* Make sure tuples are correctly marked as not-HOT */
2888 HeapTupleClearHotUpdated(&oldtup);
2889 HeapTupleClearHeapOnly(heaptup);
2890 HeapTupleClearHeapOnly(newtup);
2893 RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
2895 if (!already_marked)
2897 /* Clear obsolete visibility flags ... */
2898 oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
2900 HEAP_XMAX_IS_MULTI |
2903 /* ... and store info about transaction updating this tuple */
2904 HeapTupleHeaderSetXmax(oldtup.t_data, xid);
2905 HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
2908 /* record address of new tuple in t_ctid of old one */
2909 oldtup.t_data->t_ctid = heaptup->t_self;
2911 /* clear PD_ALL_VISIBLE flags */
2912 if (PageIsAllVisible(BufferGetPage(buffer)))
2914 all_visible_cleared = true;
2915 PageClearAllVisible(BufferGetPage(buffer));
2916 visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
2919 if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
2921 all_visible_cleared_new = true;
2922 PageClearAllVisible(BufferGetPage(newbuf));
2923 visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
2927 if (newbuf != buffer)
2928 MarkBufferDirty(newbuf);
2929 MarkBufferDirty(buffer);
2932 if (RelationNeedsWAL(relation))
2934 XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
2936 all_visible_cleared,
2937 all_visible_cleared_new);
2939 if (newbuf != buffer)
2941 PageSetLSN(BufferGetPage(newbuf), recptr);
2942 PageSetTLI(BufferGetPage(newbuf), ThisTimeLineID);
2944 PageSetLSN(BufferGetPage(buffer), recptr);
2945 PageSetTLI(BufferGetPage(buffer), ThisTimeLineID);
2950 if (newbuf != buffer)
2951 LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
2952 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2955 * Mark old tuple for invalidation from system caches at next command
2956 * boundary. We have to do this before releasing the buffer because we
2957 * need to look at the contents of the tuple.
2959 CacheInvalidateHeapTuple(relation, &oldtup);
2961 /* Now we can release the buffer(s) */
2962 if (newbuf != buffer)
2963 ReleaseBuffer(newbuf);
2964 ReleaseBuffer(buffer);
2965 if (BufferIsValid(vmbuffer_new))
2966 ReleaseBuffer(vmbuffer_new);
2967 if (BufferIsValid(vmbuffer))
2968 ReleaseBuffer(vmbuffer);
2971 * If new tuple is cachable, mark it for invalidation from the caches in
2972 * case we abort. Note it is OK to do this after releasing the buffer,
2973 * because the heaptup data structure is all in local memory, not in the
2976 CacheInvalidateHeapTuple(relation, heaptup);
2979 * Release the lmgr tuple lock, if we had it.
2981 if (have_tuple_lock)
2982 UnlockTuple(relation, &(oldtup.t_self), ExclusiveLock);
2984 pgstat_count_heap_update(relation, use_hot_update);
2987 * If heaptup is a private copy, release it. Don't forget to copy t_self
2988 * back to the caller's image, too.
2990 if (heaptup != newtup)
2992 newtup->t_self = heaptup->t_self;
2993 heap_freetuple(heaptup);
2996 bms_free(hot_attrs);
2998 return HeapTupleMayBeUpdated;
3002 * Check if the specified attribute's value is same in both given tuples.
3003 * Subroutine for HeapSatisfiesHOTUpdate.
3006 heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
3007 HeapTuple tup1, HeapTuple tup2)
3013 Form_pg_attribute att;
3016 * If it's a whole-tuple reference, say "not equal". It's not really
3017 * worth supporting this case, since it could only succeed after a no-op
3018 * update, which is hardly a case worth optimizing for.
3024 * Likewise, automatically say "not equal" for any system attribute other
3025 * than OID and tableOID; we cannot expect these to be consistent in a HOT
3026 * chain, or even to be set correctly yet in the new tuple.
3030 if (attrnum != ObjectIdAttributeNumber &&
3031 attrnum != TableOidAttributeNumber)
3036 * Extract the corresponding values. XXX this is pretty inefficient if
3037 * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
3038 * single heap_deform_tuple call on each tuple, instead? But that doesn't
3039 * work for system columns ...
3041 value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
3042 value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
3045 * If one value is NULL and other is not, then they are certainly not
3048 if (isnull1 != isnull2)
3052 * If both are NULL, they can be considered equal.
3058 * We do simple binary comparison of the two datums. This may be overly
3059 * strict because there can be multiple binary representations for the
3060 * same logical value. But we should be OK as long as there are no false
3061 * positives. Using a type-specific equality operator is messy because
3062 * there could be multiple notions of equality in different operator
3063 * classes; furthermore, we cannot safely invoke user-defined functions
3064 * while holding exclusive buffer lock.
3068 /* The only allowed system columns are OIDs, so do this */
3069 return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
3073 Assert(attrnum <= tupdesc->natts);
3074 att = tupdesc->attrs[attrnum - 1];
3075 return datumIsEqual(value1, value2, att->attbyval, att->attlen);
3080 * Check if the old and new tuples represent a HOT-safe update. To be able
3081 * to do a HOT update, we must not have changed any columns used in index
3084 * The set of attributes to be checked is passed in (we dare not try to
3085 * compute it while holding exclusive buffer lock...) NOTE that hot_attrs
3086 * is destructively modified! That is OK since this is invoked at most once
3089 * Returns true if safe to do HOT update.
3092 HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
3093 HeapTuple oldtup, HeapTuple newtup)
3097 while ((attrnum = bms_first_member(hot_attrs)) >= 0)
3099 /* Adjust for system attributes */
3100 attrnum += FirstLowInvalidHeapAttributeNumber;
3102 /* If the attribute value has changed, we can't do HOT update */
3103 if (!heap_tuple_attr_equals(RelationGetDescr(relation), attrnum,
3112 * simple_heap_update - replace a tuple
3114 * This routine may be used to update a tuple when concurrent updates of
3115 * the target tuple are not expected (for example, because we have a lock
3116 * on the relation associated with the tuple). Any failure is reported
3120 simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
3123 ItemPointerData update_ctid;
3124 TransactionId update_xmax;
3126 result = heap_update(relation, otid, tup,
3127 &update_ctid, &update_xmax,
3128 GetCurrentCommandId(true), InvalidSnapshot,
3129 true /* wait for commit */ );
3132 case HeapTupleSelfUpdated:
3133 /* Tuple was already updated in current command? */
3134 elog(ERROR, "tuple already updated by self");
3137 case HeapTupleMayBeUpdated:
3138 /* done successfully */
3141 case HeapTupleUpdated:
3142 elog(ERROR, "tuple concurrently updated");
3146 elog(ERROR, "unrecognized heap_update status: %u", result);
3152 * heap_lock_tuple - lock a tuple in shared or exclusive mode
3154 * Note that this acquires a buffer pin, which the caller must release.
3157 * relation: relation containing tuple (caller must hold suitable lock)
3158 * tuple->t_self: TID of tuple to lock (rest of struct need not be valid)
3159 * cid: current command ID (used for visibility test, and stored into
3160 * tuple's cmax if lock is successful)
3161 * mode: indicates if shared or exclusive tuple lock is desired
3162 * nowait: if true, ereport rather than blocking if lock not available
3164 * Output parameters:
3165 * *tuple: all fields filled in
3166 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
3167 * *ctid: set to tuple's t_ctid, but only in failure cases
3168 * *update_xmax: set to tuple's xmax, but only in failure cases
3170 * Function result may be:
3171 * HeapTupleMayBeUpdated: lock was successfully acquired
3172 * HeapTupleSelfUpdated: lock failed because tuple updated by self
3173 * HeapTupleUpdated: lock failed because tuple updated by other xact
3175 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
3176 * If t_ctid is the same as t_self, the tuple was deleted; if different, the
3177 * tuple was updated, and t_ctid is the location of the replacement tuple.
3178 * (t_xmax is needed to verify that the replacement tuple matches.)
3181 * NOTES: because the shared-memory lock table is of finite size, but users
3182 * could reasonably want to lock large numbers of tuples, we do not rely on
3183 * the standard lock manager to store tuple-level locks over the long term.
3184 * Instead, a tuple is marked as locked by setting the current transaction's
3185 * XID as its XMAX, and setting additional infomask bits to distinguish this
3186 * usage from the more normal case of having deleted the tuple. When
3187 * multiple transactions concurrently share-lock a tuple, the first locker's
3188 * XID is replaced in XMAX with a MultiTransactionId representing the set of
3189 * XIDs currently holding share-locks.
3191 * When it is necessary to wait for a tuple-level lock to be released, the
3192 * basic delay is provided by XactLockTableWait or MultiXactIdWait on the
3193 * contents of the tuple's XMAX. However, that mechanism will release all
3194 * waiters concurrently, so there would be a race condition as to which
3195 * waiter gets the tuple, potentially leading to indefinite starvation of
3196 * some waiters. The possibility of share-locking makes the problem much
3197 * worse --- a steady stream of share-lockers can easily block an exclusive
3198 * locker forever. To provide more reliable semantics about who gets a
3199 * tuple-level lock first, we use the standard lock manager. The protocol
3200 * for waiting for a tuple-level lock is really
3202 * XactLockTableWait()
3203 * mark tuple as locked by me
3205 * When there are multiple waiters, arbitration of who is to get the lock next
3206 * is provided by LockTuple(). However, at most one tuple-level lock will
3207 * be held or awaited per backend at any time, so we don't risk overflow
3208 * of the lock table. Note that incoming share-lockers are required to
3209 * do LockTuple as well, if there is any conflict, to ensure that they don't
3210 * starve out waiting exclusive-lockers. However, if there is not any active
3211 * conflict for a tuple, we don't incur any extra overhead.
3214 heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer *buffer,
3215 ItemPointer ctid, TransactionId *update_xmax,
3216 CommandId cid, LockTupleMode mode, bool nowait)
3219 ItemPointer tid = &(tuple->t_self);
3224 uint16 old_infomask;
3225 uint16 new_infomask;
3226 LOCKMODE tuple_lock_type;
3227 bool have_tuple_lock = false;
3229 tuple_lock_type = (mode == LockTupleShared) ? ShareLock : ExclusiveLock;
3231 *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3232 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3234 page = BufferGetPage(*buffer);
3235 lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3236 Assert(ItemIdIsNormal(lp));
3238 tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
3239 tuple->t_len = ItemIdGetLength(lp);
3240 tuple->t_tableOid = RelationGetRelid(relation);
3243 result = HeapTupleSatisfiesUpdate(tuple->t_data, cid, *buffer);
3245 if (result == HeapTupleInvisible)
3247 UnlockReleaseBuffer(*buffer);
3248 elog(ERROR, "attempted to lock invisible tuple");
3250 else if (result == HeapTupleBeingUpdated)
3252 TransactionId xwait;
3255 /* must copy state data before unlocking buffer */
3256 xwait = HeapTupleHeaderGetXmax(tuple->t_data);
3257 infomask = tuple->t_data->t_infomask;
3259 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3262 * If we wish to acquire share lock, and the tuple is already
3263 * share-locked by a multixact that includes any subtransaction of the
3264 * current top transaction, then we effectively hold the desired lock
3265 * already. We *must* succeed without trying to take the tuple lock,
3266 * else we will deadlock against anyone waiting to acquire exclusive
3267 * lock. We don't need to make any state changes in this case.
3269 if (mode == LockTupleShared &&
3270 (infomask & HEAP_XMAX_IS_MULTI) &&
3271 MultiXactIdIsCurrent((MultiXactId) xwait))
3273 Assert(infomask & HEAP_XMAX_SHARED_LOCK);
3274 /* Probably can't hold tuple lock here, but may as well check */
3275 if (have_tuple_lock)
3276 UnlockTuple(relation, tid, tuple_lock_type);
3277 return HeapTupleMayBeUpdated;
3281 * Acquire tuple lock to establish our priority for the tuple.
3282 * LockTuple will release us when we are next-in-line for the tuple.
3283 * We must do this even if we are share-locking.
3285 * If we are forced to "start over" below, we keep the tuple lock;
3286 * this arranges that we stay at the head of the line while rechecking
3289 if (!have_tuple_lock)
3293 if (!ConditionalLockTuple(relation, tid, tuple_lock_type))
3295 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
3296 errmsg("could not obtain lock on row in relation \"%s\"",
3297 RelationGetRelationName(relation))));
3300 LockTuple(relation, tid, tuple_lock_type);
3301 have_tuple_lock = true;
3304 if (mode == LockTupleShared && (infomask & HEAP_XMAX_SHARED_LOCK))
3307 * Acquiring sharelock when there's at least one sharelocker
3308 * already. We need not wait for him/them to complete.
3310 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3313 * Make sure it's still a shared lock, else start over. (It's OK
3314 * if the ownership of the shared lock has changed, though.)
3316 if (!(tuple->t_data->t_infomask & HEAP_XMAX_SHARED_LOCK))
3319 else if (infomask & HEAP_XMAX_IS_MULTI)
3321 /* wait for multixact to end */
3324 if (!ConditionalMultiXactIdWait((MultiXactId) xwait))
3326 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
3327 errmsg("could not obtain lock on row in relation \"%s\"",
3328 RelationGetRelationName(relation))));
3331 MultiXactIdWait((MultiXactId) xwait);
3333 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3336 * If xwait had just locked the tuple then some other xact could
3337 * update this tuple before we get to this point. Check for xmax
3338 * change, and start over if so.
3340 if (!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
3341 !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple->t_data),
3346 * You might think the multixact is necessarily done here, but not
3347 * so: it could have surviving members, namely our own xact or
3348 * other subxacts of this backend. It is legal for us to lock the
3349 * tuple in either case, however. We don't bother changing the
3350 * on-disk hint bits since we are about to overwrite the xmax
3356 /* wait for regular transaction to end */
3359 if (!ConditionalXactLockTableWait(xwait))
3361 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
3362 errmsg("could not obtain lock on row in relation \"%s\"",
3363 RelationGetRelationName(relation))));
3366 XactLockTableWait(xwait);
3368 LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3371 * xwait is done, but if xwait had just locked the tuple then some
3372 * other xact could update this tuple before we get to this point.
3373 * Check for xmax change, and start over if so.
3375 if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
3376 !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple->t_data),
3380 /* Otherwise check if it committed or aborted */
3381 UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
3385 * We may lock if previous xmax aborted, or if it committed but only
3386 * locked the tuple without updating it. The case where we didn't
3387 * wait because we are joining an existing shared lock is correctly
3390 if (tuple->t_data->t_infomask & (HEAP_XMAX_INVALID |
3392 result = HeapTupleMayBeUpdated;
3394 result = HeapTupleUpdated;
3397 if (result != HeapTupleMayBeUpdated)
3399 Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated);
3400 Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
3401 *ctid = tuple->t_data->t_ctid;
3402 *update_xmax = HeapTupleHeaderGetXmax(tuple->t_data);
3403 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3404 if (have_tuple_lock)
3405 UnlockTuple(relation, tid, tuple_lock_type);
3410 * We might already hold the desired lock (or stronger), possibly under a
3411 * different subtransaction of the current top transaction. If so, there
3412 * is no need to change state or issue a WAL record. We already handled
3413 * the case where this is true for xmax being a MultiXactId, so now check
3414 * for cases where it is a plain TransactionId.
3416 * Note in particular that this covers the case where we already hold
3417 * exclusive lock on the tuple and the caller only wants shared lock. It
3418 * would certainly not do to give up the exclusive lock.
3420 xmax = HeapTupleHeaderGetXmax(tuple->t_data);
3421 old_infomask = tuple->t_data->t_infomask;
3423 if (!(old_infomask & (HEAP_XMAX_INVALID |
3424 HEAP_XMAX_COMMITTED |
3425 HEAP_XMAX_IS_MULTI)) &&
3426 (mode == LockTupleShared ?
3427 (old_infomask & HEAP_IS_LOCKED) :
3428 (old_infomask & HEAP_XMAX_EXCL_LOCK)) &&
3429 TransactionIdIsCurrentTransactionId(xmax))
3431 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3432 /* Probably can't hold tuple lock here, but may as well check */
3433 if (have_tuple_lock)
3434 UnlockTuple(relation, tid, tuple_lock_type);
3435 return HeapTupleMayBeUpdated;
3439 * Compute the new xmax and infomask to store into the tuple. Note we do
3440 * not modify the tuple just yet, because that would leave it in the wrong
3441 * state if multixact.c elogs.
3443 xid = GetCurrentTransactionId();
3445 new_infomask = old_infomask & ~(HEAP_XMAX_COMMITTED |
3447 HEAP_XMAX_IS_MULTI |
3451 if (mode == LockTupleShared)
3454 * If this is the first acquisition of a shared lock in the current
3455 * transaction, set my per-backend OldestMemberMXactId setting. We can
3456 * be certain that the transaction will never become a member of any
3457 * older MultiXactIds than that. (We have to do this even if we end
3458 * up just using our own TransactionId below, since some other backend
3459 * could incorporate our XID into a MultiXact immediately afterwards.)
3461 MultiXactIdSetOldestMember();
3463 new_infomask |= HEAP_XMAX_SHARED_LOCK;
3466 * Check to see if we need a MultiXactId because there are multiple
3469 * HeapTupleSatisfiesUpdate will have set the HEAP_XMAX_INVALID bit if
3470 * the xmax was a MultiXactId but it was not running anymore. There is
3471 * a race condition, which is that the MultiXactId may have finished
3472 * since then, but that uncommon case is handled within
3473 * MultiXactIdExpand.
3475 * There is a similar race condition possible when the old xmax was a
3476 * regular TransactionId. We test TransactionIdIsInProgress again
3477 * just to narrow the window, but it's still possible to end up
3478 * creating an unnecessary MultiXactId. Fortunately this is harmless.
3480 if (!(old_infomask & (HEAP_XMAX_INVALID | HEAP_XMAX_COMMITTED)))
3482 if (old_infomask & HEAP_XMAX_IS_MULTI)
3485 * If the XMAX is already a MultiXactId, then we need to
3486 * expand it to include our own TransactionId.
3488 xid = MultiXactIdExpand((MultiXactId) xmax, xid);
3489 new_infomask |= HEAP_XMAX_IS_MULTI;
3491 else if (TransactionIdIsInProgress(xmax))
3494 * If the XMAX is a valid TransactionId, then we need to
3495 * create a new MultiXactId that includes both the old locker
3496 * and our own TransactionId.
3498 xid = MultiXactIdCreate(xmax, xid);
3499 new_infomask |= HEAP_XMAX_IS_MULTI;
3504 * Can get here iff HeapTupleSatisfiesUpdate saw the old xmax
3505 * as running, but it finished before
3506 * TransactionIdIsInProgress() got to run. Treat it like
3507 * there's no locker in the tuple.
3514 * There was no previous locker, so just insert our own
3521 /* We want an exclusive lock on the tuple */
3522 new_infomask |= HEAP_XMAX_EXCL_LOCK;
3525 START_CRIT_SECTION();
3528 * Store transaction information of xact locking the tuple.
3530 * Note: Cmax is meaningless in this context, so don't set it; this avoids
3531 * possibly generating a useless combo CID.
3533 tuple->t_data->t_infomask = new_infomask;
3534 HeapTupleHeaderClearHotUpdated(tuple->t_data);
3535 HeapTupleHeaderSetXmax(tuple->t_data, xid);
3536 /* Make sure there is no forward chain link in t_ctid */
3537 tuple->t_data->t_ctid = *tid;
3539 MarkBufferDirty(*buffer);
3542 * XLOG stuff. You might think that we don't need an XLOG record because
3543 * there is no state change worth restoring after a crash. You would be
3544 * wrong however: we have just written either a TransactionId or a
3545 * MultiXactId that may never have been seen on disk before, and we need
3546 * to make sure that there are XLOG entries covering those ID numbers.
3547 * Else the same IDs might be re-used after a crash, which would be
3548 * disastrous if this page made it to disk before the crash. Essentially
3549 * we have to enforce the WAL log-before-data rule even in this case.
3550 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
3551 * entries for everything anyway.)
3553 if (RelationNeedsWAL(relation))
3557 XLogRecData rdata[2];
3559 xlrec.target.node = relation->rd_node;
3560 xlrec.target.tid = tuple->t_self;
3561 xlrec.locking_xid = xid;
3562 xlrec.xid_is_mxact = ((new_infomask & HEAP_XMAX_IS_MULTI) != 0);
3563 xlrec.shared_lock = (mode == LockTupleShared);
3564 rdata[0].data = (char *) &xlrec;
3565 rdata[0].len = SizeOfHeapLock;
3566 rdata[0].buffer = InvalidBuffer;
3567 rdata[0].next = &(rdata[1]);
3569 rdata[1].data = NULL;
3571 rdata[1].buffer = *buffer;
3572 rdata[1].buffer_std = true;
3573 rdata[1].next = NULL;
3575 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK, rdata);
3577 PageSetLSN(page, recptr);
3578 PageSetTLI(page, ThisTimeLineID);
3583 LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3586 * Don't update the visibility map here. Locking a tuple doesn't change
3591 * Now that we have successfully marked the tuple as locked, we can
3592 * release the lmgr tuple lock, if we had it.
3594 if (have_tuple_lock)
3595 UnlockTuple(relation, tid, tuple_lock_type);
3597 return HeapTupleMayBeUpdated;
3602 * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
3604 * Overwriting violates both MVCC and transactional safety, so the uses
3605 * of this function in Postgres are extremely limited. Nonetheless we
3606 * find some places to use it.
3608 * The tuple cannot change size, and therefore it's reasonable to assume
3609 * that its null bitmap (if any) doesn't change either. So we just
3610 * overwrite the data portion of the tuple without touching the null
3611 * bitmap or any of the header fields.
3613 * tuple is an in-memory tuple structure containing the data to be written
3614 * over the target tuple. Also, tuple->t_self identifies the target tuple.
3617 heap_inplace_update(Relation relation, HeapTuple tuple)
3621 OffsetNumber offnum;
3623 HeapTupleHeader htup;
3627 buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
3628 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3629 page = (Page) BufferGetPage(buffer);
3631 offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
3632 if (PageGetMaxOffsetNumber(page) >= offnum)
3633 lp = PageGetItemId(page, offnum);
3635 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
3636 elog(ERROR, "heap_inplace_update: invalid lp");
3638 htup = (HeapTupleHeader) PageGetItem(page, lp);
3640 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
3641 newlen = tuple->t_len - tuple->t_data->t_hoff;
3642 if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
3643 elog(ERROR, "heap_inplace_update: wrong tuple length");
3645 /* NO EREPORT(ERROR) from here till changes are logged */
3646 START_CRIT_SECTION();
3648 memcpy((char *) htup + htup->t_hoff,
3649 (char *) tuple->t_data + tuple->t_data->t_hoff,
3652 MarkBufferDirty(buffer);
3655 if (RelationNeedsWAL(relation))
3657 xl_heap_inplace xlrec;
3659 XLogRecData rdata[2];
3661 xlrec.target.node = relation->rd_node;
3662 xlrec.target.tid = tuple->t_self;
3664 rdata[0].data = (char *) &xlrec;
3665 rdata[0].len = SizeOfHeapInplace;
3666 rdata[0].buffer = InvalidBuffer;
3667 rdata[0].next = &(rdata[1]);
3669 rdata[1].data = (char *) htup + htup->t_hoff;
3670 rdata[1].len = newlen;
3671 rdata[1].buffer = buffer;
3672 rdata[1].buffer_std = true;
3673 rdata[1].next = NULL;
3675 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE, rdata);
3677 PageSetLSN(page, recptr);
3678 PageSetTLI(page, ThisTimeLineID);
3683 UnlockReleaseBuffer(buffer);
3685 /* Send out shared cache inval if necessary */
3686 if (!IsBootstrapProcessingMode())
3687 CacheInvalidateHeapTuple(relation, tuple);
3694 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
3695 * are older than the specified cutoff XID. If so, replace them with
3696 * FrozenTransactionId or InvalidTransactionId as appropriate, and return
3697 * TRUE. Return FALSE if nothing was changed.
3699 * It is assumed that the caller has checked the tuple with
3700 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
3701 * (else we should be removing the tuple, not freezing it).
3703 * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any
3704 * XID older than it could neither be running nor seen as running by any
3705 * open transaction. This ensures that the replacement will not change
3706 * anyone's idea of the tuple state. Also, since we assume the tuple is
3707 * not HEAPTUPLE_DEAD, the fact that an XID is not still running allows us
3708 * to assume that it is either committed good or aborted, as appropriate;
3709 * so we need no external state checks to decide what to do. (This is good
3710 * because this function is applied during WAL recovery, when we don't have
3711 * access to any such state, and can't depend on the hint bits to be set.)
3713 * In lazy VACUUM, we call this while initially holding only a shared lock
3714 * on the tuple's buffer. If any change is needed, we trade that in for an
3715 * exclusive lock before making the change. Caller should pass the buffer ID
3716 * if shared lock is held, InvalidBuffer if exclusive lock is already held.
3718 * Note: it might seem we could make the changes without exclusive lock, since
3719 * TransactionId read/write is assumed atomic anyway. However there is a race
3720 * condition: someone who just fetched an old XID that we overwrite here could
3721 * conceivably not finish checking the XID against pg_clog before we finish
3722 * the VACUUM and perhaps truncate off the part of pg_clog he needs. Getting
3723 * exclusive lock ensures no other backend is in process of checking the
3724 * tuple status. Also, getting exclusive lock makes it safe to adjust the
3728 heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
3731 bool changed = false;
3734 xid = HeapTupleHeaderGetXmin(tuple);
3735 if (TransactionIdIsNormal(xid) &&
3736 TransactionIdPrecedes(xid, cutoff_xid))
3738 if (buf != InvalidBuffer)
3740 /* trade in share lock for exclusive lock */
3741 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
3742 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
3743 buf = InvalidBuffer;
3745 HeapTupleHeaderSetXmin(tuple, FrozenTransactionId);
3748 * Might as well fix the hint bits too; usually XMIN_COMMITTED will
3749 * already be set here, but there's a small chance not.
3751 Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
3752 tuple->t_infomask |= HEAP_XMIN_COMMITTED;
3757 * When we release shared lock, it's possible for someone else to change
3758 * xmax before we get the lock back, so repeat the check after acquiring
3759 * exclusive lock. (We don't need this pushup for xmin, because only
3760 * VACUUM could be interested in changing an existing tuple's xmin, and
3761 * there's only one VACUUM allowed on a table at a time.)
3764 if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
3766 xid = HeapTupleHeaderGetXmax(tuple);
3767 if (TransactionIdIsNormal(xid) &&
3768 TransactionIdPrecedes(xid, cutoff_xid))
3770 if (buf != InvalidBuffer)
3772 /* trade in share lock for exclusive lock */
3773 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
3774 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
3775 buf = InvalidBuffer;
3776 goto recheck_xmax; /* see comment above */
3778 HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
3781 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
3782 * + LOCKED. Normalize to INVALID just to be sure no one gets
3785 tuple->t_infomask &= ~HEAP_XMAX_COMMITTED;
3786 tuple->t_infomask |= HEAP_XMAX_INVALID;
3787 HeapTupleHeaderClearHotUpdated(tuple);
3794 * XXX perhaps someday we should zero out very old MultiXactIds here?
3796 * The only way a stale MultiXactId could pose a problem is if a
3797 * tuple, having once been multiply-share-locked, is not touched by
3798 * any vacuum or attempted lock or deletion for just over 4G MultiXact
3799 * creations, and then in the probably-narrow window where its xmax
3800 * is again a live MultiXactId, someone tries to lock or delete it.
3801 * Even then, another share-lock attempt would work fine. An
3802 * exclusive-lock or delete attempt would face unexpected delay, or
3803 * in the very worst case get a deadlock error. This seems an
3804 * extremely low-probability scenario with minimal downside even if
3805 * it does happen, so for now we don't do the extra bookkeeping that
3806 * would be needed to clean out MultiXactIds.
3812 * Although xvac per se could only be set by old-style VACUUM FULL, it
3813 * shares physical storage space with cmax, and so could be wiped out by
3814 * someone setting xmax. Hence recheck after changing lock, same as for
3817 * Old-style VACUUM FULL is gone, but we have to keep this code as long as
3818 * we support having MOVED_OFF/MOVED_IN tuples in the database.
3821 if (tuple->t_infomask & HEAP_MOVED)
3823 xid = HeapTupleHeaderGetXvac(tuple);
3824 if (TransactionIdIsNormal(xid) &&
3825 TransactionIdPrecedes(xid, cutoff_xid))
3827 if (buf != InvalidBuffer)
3829 /* trade in share lock for exclusive lock */
3830 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
3831 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
3832 buf = InvalidBuffer;
3833 goto recheck_xvac; /* see comment above */
3837 * If a MOVED_OFF tuple is not dead, the xvac transaction must
3838 * have failed; whereas a non-dead MOVED_IN tuple must mean the
3839 * xvac transaction succeeded.
3841 if (tuple->t_infomask & HEAP_MOVED_OFF)
3842 HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
3844 HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
3847 * Might as well fix the hint bits too; usually XMIN_COMMITTED
3848 * will already be set here, but there's a small chance not.
3850 Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
3851 tuple->t_infomask |= HEAP_XMIN_COMMITTED;
3861 * heap_markpos - mark scan position
3865 heap_markpos(HeapScanDesc scan)
3867 /* Note: no locking manipulations needed */
3869 if (scan->rs_ctup.t_data != NULL)
3871 scan->rs_mctid = scan->rs_ctup.t_self;
3872 if (scan->rs_pageatatime)
3873 scan->rs_mindex = scan->rs_cindex;
3876 ItemPointerSetInvalid(&scan->rs_mctid);
3880 * heap_restrpos - restore position to marked location
3884 heap_restrpos(HeapScanDesc scan)
3886 /* XXX no amrestrpos checking that ammarkpos called */
3888 if (!ItemPointerIsValid(&scan->rs_mctid))
3890 scan->rs_ctup.t_data = NULL;
3893 * unpin scan buffers
3895 if (BufferIsValid(scan->rs_cbuf))
3896 ReleaseBuffer(scan->rs_cbuf);
3897 scan->rs_cbuf = InvalidBuffer;
3898 scan->rs_cblock = InvalidBlockNumber;
3899 scan->rs_inited = false;
3904 * If we reached end of scan, rs_inited will now be false. We must
3905 * reset it to true to keep heapgettup from doing the wrong thing.
3907 scan->rs_inited = true;
3908 scan->rs_ctup.t_self = scan->rs_mctid;
3909 if (scan->rs_pageatatime)
3911 scan->rs_cindex = scan->rs_mindex;
3912 heapgettup_pagemode(scan,
3913 NoMovementScanDirection,
3914 0, /* needn't recheck scan keys */
3919 NoMovementScanDirection,
3920 0, /* needn't recheck scan keys */
3926 * If 'tuple' contains any visible XID greater than latestRemovedXid,
3927 * ratchet forwards latestRemovedXid to the greatest one found.
3928 * This is used as the basis for generating Hot Standby conflicts, so
3929 * if a tuple was never visible then removing it should not conflict
3933 HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
3934 TransactionId *latestRemovedXid)
3936 TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
3937 TransactionId xmax = HeapTupleHeaderGetXmax(tuple);
3938 TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
3940 if (tuple->t_infomask & HEAP_MOVED)
3942 if (TransactionIdPrecedes(*latestRemovedXid, xvac))
3943 *latestRemovedXid = xvac;
3947 * Ignore tuples inserted by an aborted transaction or if the tuple was
3948 * updated/deleted by the inserting transaction.
3950 * Look for a committed hint bit, or if no xmin bit is set, check clog.
3951 * This needs to work on both master and standby, where it is used to
3952 * assess btree delete records.
3954 if ((tuple->t_infomask & HEAP_XMIN_COMMITTED) ||
3955 (!(tuple->t_infomask & HEAP_XMIN_COMMITTED) &&
3956 !(tuple->t_infomask & HEAP_XMIN_INVALID) &&
3957 TransactionIdDidCommit(xmin)))
3960 TransactionIdFollows(xmax, *latestRemovedXid))
3961 *latestRemovedXid = xmax;
3964 /* *latestRemovedXid may still be invalid at end */
3968 * Perform XLogInsert to register a heap cleanup info message. These
3969 * messages are sent once per VACUUM and are required because
3970 * of the phasing of removal operations during a lazy VACUUM.
3971 * see comments for vacuum_log_cleanup_info().
3974 log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid)
3976 xl_heap_cleanup_info xlrec;
3981 xlrec.latestRemovedXid = latestRemovedXid;
3983 rdata.data = (char *) &xlrec;
3984 rdata.len = SizeOfHeapCleanupInfo;
3985 rdata.buffer = InvalidBuffer;
3988 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_CLEANUP_INFO, &rdata);
3994 * Perform XLogInsert for a heap-clean operation. Caller must already
3995 * have modified the buffer and marked it dirty.
3997 * Note: prior to Postgres 8.3, the entries in the nowunused[] array were
3998 * zero-based tuple indexes. Now they are one-based like other uses
4001 * We also include latestRemovedXid, which is the greatest XID present in
4002 * the removed tuples. That allows recovery processing to cancel or wait
4003 * for long standby queries that can still see these tuples.
4006 log_heap_clean(Relation reln, Buffer buffer,
4007 OffsetNumber *redirected, int nredirected,
4008 OffsetNumber *nowdead, int ndead,
4009 OffsetNumber *nowunused, int nunused,
4010 TransactionId latestRemovedXid)
4012 xl_heap_clean xlrec;
4015 XLogRecData rdata[4];
4017 /* Caller should not call me on a non-WAL-logged relation */
4018 Assert(RelationNeedsWAL(reln));
4020 xlrec.node = reln->rd_node;
4021 xlrec.block = BufferGetBlockNumber(buffer);
4022 xlrec.latestRemovedXid = latestRemovedXid;
4023 xlrec.nredirected = nredirected;
4024 xlrec.ndead = ndead;
4026 rdata[0].data = (char *) &xlrec;
4027 rdata[0].len = SizeOfHeapClean;
4028 rdata[0].buffer = InvalidBuffer;
4029 rdata[0].next = &(rdata[1]);
4032 * The OffsetNumber arrays are not actually in the buffer, but we pretend
4033 * that they are. When XLogInsert stores the whole buffer, the offset
4034 * arrays need not be stored too. Note that even if all three arrays are
4035 * empty, we want to expose the buffer as a candidate for whole-page
4036 * storage, since this record type implies a defragmentation operation
4037 * even if no item pointers changed state.
4039 if (nredirected > 0)
4041 rdata[1].data = (char *) redirected;
4042 rdata[1].len = nredirected * sizeof(OffsetNumber) * 2;
4046 rdata[1].data = NULL;
4049 rdata[1].buffer = buffer;
4050 rdata[1].buffer_std = true;
4051 rdata[1].next = &(rdata[2]);
4055 rdata[2].data = (char *) nowdead;
4056 rdata[2].len = ndead * sizeof(OffsetNumber);
4060 rdata[2].data = NULL;
4063 rdata[2].buffer = buffer;
4064 rdata[2].buffer_std = true;
4065 rdata[2].next = &(rdata[3]);
4069 rdata[3].data = (char *) nowunused;
4070 rdata[3].len = nunused * sizeof(OffsetNumber);
4074 rdata[3].data = NULL;
4077 rdata[3].buffer = buffer;
4078 rdata[3].buffer_std = true;
4079 rdata[3].next = NULL;
4081 info = XLOG_HEAP2_CLEAN;
4082 recptr = XLogInsert(RM_HEAP2_ID, info, rdata);
4088 * Perform XLogInsert for a heap-freeze operation. Caller must already
4089 * have modified the buffer and marked it dirty.
4092 log_heap_freeze(Relation reln, Buffer buffer,
4093 TransactionId cutoff_xid,
4094 OffsetNumber *offsets, int offcnt)
4096 xl_heap_freeze xlrec;
4098 XLogRecData rdata[2];
4100 /* Caller should not call me on a non-WAL-logged relation */
4101 Assert(RelationNeedsWAL(reln));
4102 /* nor when there are no tuples to freeze */
4105 xlrec.node = reln->rd_node;
4106 xlrec.block = BufferGetBlockNumber(buffer);
4107 xlrec.cutoff_xid = cutoff_xid;
4109 rdata[0].data = (char *) &xlrec;
4110 rdata[0].len = SizeOfHeapFreeze;
4111 rdata[0].buffer = InvalidBuffer;
4112 rdata[0].next = &(rdata[1]);
4115 * The tuple-offsets array is not actually in the buffer, but pretend that
4116 * it is. When XLogInsert stores the whole buffer, the offsets array need
4117 * not be stored too.
4119 rdata[1].data = (char *) offsets;
4120 rdata[1].len = offcnt * sizeof(OffsetNumber);
4121 rdata[1].buffer = buffer;
4122 rdata[1].buffer_std = true;
4123 rdata[1].next = NULL;
4125 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_FREEZE, rdata);
4131 * Perform XLogInsert for a heap-visible operation. 'block' is the block
4132 * being marked all-visible, and vm_buffer is the buffer containing the
4133 * corresponding visibility map block. Both should have already been modified
4137 log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer)
4139 xl_heap_visible xlrec;
4141 XLogRecData rdata[2];
4144 xlrec.block = block;
4146 rdata[0].data = (char *) &xlrec;
4147 rdata[0].len = SizeOfHeapVisible;
4148 rdata[0].buffer = InvalidBuffer;
4149 rdata[0].next = &(rdata[1]);
4151 rdata[1].data = NULL;
4153 rdata[1].buffer = vm_buffer;
4154 rdata[1].buffer_std = false;
4155 rdata[1].next = NULL;
4157 recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE, rdata);
4163 * Perform XLogInsert for a heap-update operation. Caller must already
4164 * have modified the buffer(s) and marked them dirty.
4167 log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
4168 Buffer newbuf, HeapTuple newtup,
4169 bool all_visible_cleared, bool new_all_visible_cleared)
4171 xl_heap_update xlrec;
4172 xl_heap_header xlhdr;
4175 XLogRecData rdata[4];
4176 Page page = BufferGetPage(newbuf);
4178 /* Caller should not call me on a non-WAL-logged relation */
4179 Assert(RelationNeedsWAL(reln));
4181 if (HeapTupleIsHeapOnly(newtup))
4182 info = XLOG_HEAP_HOT_UPDATE;
4184 info = XLOG_HEAP_UPDATE;
4186 xlrec.target.node = reln->rd_node;
4187 xlrec.target.tid = from;
4188 xlrec.all_visible_cleared = all_visible_cleared;
4189 xlrec.newtid = newtup->t_self;
4190 xlrec.new_all_visible_cleared = new_all_visible_cleared;
4192 rdata[0].data = (char *) &xlrec;
4193 rdata[0].len = SizeOfHeapUpdate;
4194 rdata[0].buffer = InvalidBuffer;
4195 rdata[0].next = &(rdata[1]);
4197 rdata[1].data = NULL;
4199 rdata[1].buffer = oldbuf;
4200 rdata[1].buffer_std = true;
4201 rdata[1].next = &(rdata[2]);
4203 xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
4204 xlhdr.t_infomask = newtup->t_data->t_infomask;
4205 xlhdr.t_hoff = newtup->t_data->t_hoff;
4208 * As with insert records, we need not store the rdata[2] segment if we
4209 * decide to store the whole buffer instead.
4211 rdata[2].data = (char *) &xlhdr;
4212 rdata[2].len = SizeOfHeapHeader;
4213 rdata[2].buffer = newbuf;
4214 rdata[2].buffer_std = true;
4215 rdata[2].next = &(rdata[3]);
4217 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
4218 rdata[3].data = (char *) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits);
4219 rdata[3].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits);
4220 rdata[3].buffer = newbuf;
4221 rdata[3].buffer_std = true;
4222 rdata[3].next = NULL;
4224 /* If new tuple is the single and first tuple on page... */
4225 if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
4226 PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
4228 info |= XLOG_HEAP_INIT_PAGE;
4229 rdata[2].buffer = rdata[3].buffer = InvalidBuffer;
4232 recptr = XLogInsert(RM_HEAP_ID, info, rdata);
4238 * Perform XLogInsert of a HEAP_NEWPAGE record to WAL. Caller is responsible
4239 * for writing the page to disk after calling this routine.
4241 * Note: all current callers build pages in private memory and write them
4242 * directly to smgr, rather than using bufmgr. Therefore there is no need
4243 * to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
4244 * the critical section.
4246 * Note: the NEWPAGE log record is used for both heaps and indexes, so do
4247 * not do anything that assumes we are touching a heap.
4250 log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
4253 xl_heap_newpage xlrec;
4255 XLogRecData rdata[2];
4257 /* NO ELOG(ERROR) from here till newpage op is logged */
4258 START_CRIT_SECTION();
4260 xlrec.node = *rnode;
4261 xlrec.forknum = forkNum;
4262 xlrec.blkno = blkno;
4264 rdata[0].data = (char *) &xlrec;
4265 rdata[0].len = SizeOfHeapNewpage;
4266 rdata[0].buffer = InvalidBuffer;
4267 rdata[0].next = &(rdata[1]);
4269 rdata[1].data = (char *) page;
4270 rdata[1].len = BLCKSZ;
4271 rdata[1].buffer = InvalidBuffer;
4272 rdata[1].next = NULL;
4274 recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_NEWPAGE, rdata);
4277 * The page may be uninitialized. If so, we can't set the LSN and TLI
4278 * because that would corrupt the page.
4280 if (!PageIsNew(page))
4282 PageSetLSN(page, recptr);
4283 PageSetTLI(page, ThisTimeLineID);
4292 * Handles CLEANUP_INFO
4295 heap_xlog_cleanup_info(XLogRecPtr lsn, XLogRecord *record)
4297 xl_heap_cleanup_info *xlrec = (xl_heap_cleanup_info *) XLogRecGetData(record);
4300 ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, xlrec->node);
4303 * Actual operation is a no-op. Record type exists to provide a means for
4304 * conflict processing to occur before we begin index vacuum actions. see
4305 * vacuumlazy.c and also comments in btvacuumpage()
4310 * Handles HEAP2_CLEAN record type
4313 heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record)
4315 xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
4319 OffsetNumber *redirected;
4320 OffsetNumber *nowdead;
4321 OffsetNumber *nowunused;
4328 * We're about to remove tuples. In Hot Standby mode, ensure that there's
4329 * no queries running for which the removed tuples are still visible.
4331 * Not all HEAP2_CLEAN records remove tuples with xids, so we only want to
4332 * conflict on the records that cause MVCC failures for user queries. If
4333 * latestRemovedXid is invalid, skip conflict processing.
4335 if (InHotStandby && TransactionIdIsValid(xlrec->latestRemovedXid))
4336 ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid,
4339 RestoreBkpBlocks(lsn, record, true);
4341 if (record->xl_info & XLR_BKP_BLOCK_1)
4344 buffer = XLogReadBufferExtended(xlrec->node, MAIN_FORKNUM, xlrec->block, RBM_NORMAL);
4345 if (!BufferIsValid(buffer))
4347 LockBufferForCleanup(buffer);
4348 page = (Page) BufferGetPage(buffer);
4350 if (XLByteLE(lsn, PageGetLSN(page)))
4352 UnlockReleaseBuffer(buffer);
4356 nredirected = xlrec->nredirected;
4357 ndead = xlrec->ndead;
4358 end = (OffsetNumber *) ((char *) xlrec + record->xl_len);
4359 redirected = (OffsetNumber *) ((char *) xlrec + SizeOfHeapClean);
4360 nowdead = redirected + (nredirected * 2);
4361 nowunused = nowdead + ndead;
4362 nunused = (end - nowunused);
4363 Assert(nunused >= 0);
4365 /* Update all item pointers per the record, and repair fragmentation */
4366 heap_page_prune_execute(buffer,
4367 redirected, nredirected,
4369 nowunused, nunused);
4371 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
4374 * Note: we don't worry about updating the page's prunability hints. At
4375 * worst this will cause an extra prune cycle to occur soon.
4378 PageSetLSN(page, lsn);
4379 PageSetTLI(page, ThisTimeLineID);
4380 MarkBufferDirty(buffer);
4381 UnlockReleaseBuffer(buffer);
4384 * Update the FSM as well.
4386 * XXX: We don't get here if the page was restored from full page image.
4387 * We don't bother to update the FSM in that case, it doesn't need to be
4388 * totally accurate anyway.
4390 XLogRecordPageWithFreeSpace(xlrec->node, xlrec->block, freespace);
4394 heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record)
4396 xl_heap_freeze *xlrec = (xl_heap_freeze *) XLogRecGetData(record);
4397 TransactionId cutoff_xid = xlrec->cutoff_xid;
4402 * In Hot Standby mode, ensure that there's no queries running which still
4403 * consider the frozen xids as running.
4406 ResolveRecoveryConflictWithSnapshot(cutoff_xid, xlrec->node);
4408 RestoreBkpBlocks(lsn, record, false);
4410 if (record->xl_info & XLR_BKP_BLOCK_1)
4413 buffer = XLogReadBufferExtended(xlrec->node, MAIN_FORKNUM, xlrec->block, RBM_NORMAL);
4414 if (!BufferIsValid(buffer))
4416 LockBufferForCleanup(buffer);
4417 page = (Page) BufferGetPage(buffer);
4419 if (XLByteLE(lsn, PageGetLSN(page)))
4421 UnlockReleaseBuffer(buffer);
4425 if (record->xl_len > SizeOfHeapFreeze)
4427 OffsetNumber *offsets;
4428 OffsetNumber *offsets_end;
4430 offsets = (OffsetNumber *) ((char *) xlrec + SizeOfHeapFreeze);
4431 offsets_end = (OffsetNumber *) ((char *) xlrec + record->xl_len);
4433 while (offsets < offsets_end)
4435 /* offsets[] entries are one-based */
4436 ItemId lp = PageGetItemId(page, *offsets);
4437 HeapTupleHeader tuple = (HeapTupleHeader) PageGetItem(page, lp);
4439 (void) heap_freeze_tuple(tuple, cutoff_xid, InvalidBuffer);
4444 PageSetLSN(page, lsn);
4445 PageSetTLI(page, ThisTimeLineID);
4446 MarkBufferDirty(buffer);
4447 UnlockReleaseBuffer(buffer);
4451 * Replay XLOG_HEAP2_VISIBLE record.
4453 * The critical integrity requirement here is that we must never end up with
4454 * a situation where the visibility map bit is set, and the page-level
4455 * PD_ALL_VISIBLE bit is clear. If that were to occur, then a subsequent
4456 * page modification would fail to clear the visibility map bit.
4459 heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
4461 xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
4466 * Read the heap page, if it still exists. If the heap file has been
4467 * dropped or truncated later in recovery, this might fail. In that case,
4468 * there's no point in doing anything further, since the visibility map
4469 * will have to be cleared out at the same time.
4471 buffer = XLogReadBufferExtended(xlrec->node, MAIN_FORKNUM, xlrec->block,
4473 if (!BufferIsValid(buffer))
4475 page = (Page) BufferGetPage(buffer);
4477 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
4480 * We don't bump the LSN of the heap page when setting the visibility
4481 * map bit, because that would generate an unworkable volume of
4482 * full-page writes. This exposes us to torn page hazards, but since
4483 * we're not inspecting the existing page contents in any way, we
4486 * However, all operations that clear the visibility map bit *do* bump
4487 * the LSN, and those operations will only be replayed if the XLOG LSN
4488 * follows the page LSN. Thus, if the page LSN has advanced past our
4489 * XLOG record's LSN, we mustn't mark the page all-visible, because
4490 * the subsequent update won't be replayed to clear the flag.
4492 if (!XLByteLE(lsn, PageGetLSN(page)))
4494 PageSetAllVisible(page);
4495 MarkBufferDirty(buffer);
4498 /* Done with heap page. */
4499 UnlockReleaseBuffer(buffer);
4502 * Even we skipped the heap page update due to the LSN interlock, it's
4503 * still safe to update the visibility map. Any WAL record that clears
4504 * the visibility map bit does so before checking the page LSN, so any
4505 * bits that need to be cleared will still be cleared.
4507 if (record->xl_info & XLR_BKP_BLOCK_1)
4508 RestoreBkpBlocks(lsn, record, false);
4512 Buffer vmbuffer = InvalidBuffer;
4514 reln = CreateFakeRelcacheEntry(xlrec->node);
4515 visibilitymap_pin(reln, xlrec->block, &vmbuffer);
4518 * Don't set the bit if replay has already passed this point.
4520 * It might be safe to do this unconditionally; if replay has past
4521 * this point, we'll replay at least as far this time as we did before,
4522 * and if this bit needs to be cleared, the record responsible for
4523 * doing so should be again replayed, and clear it. For right now,
4524 * out of an abundance of conservatism, we use the same test here
4525 * we did for the heap page; if this results in a dropped bit, no real
4526 * harm is done; and the next VACUUM will fix it.
4528 if (!XLByteLE(lsn, PageGetLSN(BufferGetPage(vmbuffer))))
4529 visibilitymap_set(reln, xlrec->block, lsn, vmbuffer);
4531 ReleaseBuffer(vmbuffer);
4532 FreeFakeRelcacheEntry(reln);
4537 heap_xlog_newpage(XLogRecPtr lsn, XLogRecord *record)
4539 xl_heap_newpage *xlrec = (xl_heap_newpage *) XLogRecGetData(record);
4544 * Note: the NEWPAGE log record is used for both heaps and indexes, so do
4545 * not do anything that assumes we are touching a heap.
4547 buffer = XLogReadBufferExtended(xlrec->node, xlrec->forknum, xlrec->blkno,
4549 Assert(BufferIsValid(buffer));
4550 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
4551 page = (Page) BufferGetPage(buffer);
4553 Assert(record->xl_len == SizeOfHeapNewpage + BLCKSZ);
4554 memcpy(page, (char *) xlrec + SizeOfHeapNewpage, BLCKSZ);
4557 * The page may be uninitialized. If so, we can't set the LSN and TLI
4558 * because that would corrupt the page.
4560 if (!PageIsNew(page))
4562 PageSetLSN(page, lsn);
4563 PageSetTLI(page, ThisTimeLineID);
4566 MarkBufferDirty(buffer);
4567 UnlockReleaseBuffer(buffer);
4571 heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
4573 xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
4576 OffsetNumber offnum;
4578 HeapTupleHeader htup;
4581 blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
4584 * The visibility map may need to be fixed even if the heap page is
4585 * already up-to-date.
4587 if (xlrec->all_visible_cleared)
4589 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
4590 Buffer vmbuffer = InvalidBuffer;
4592 visibilitymap_pin(reln, blkno, &vmbuffer);
4593 visibilitymap_clear(reln, blkno, vmbuffer);
4594 ReleaseBuffer(vmbuffer);
4595 FreeFakeRelcacheEntry(reln);
4598 if (record->xl_info & XLR_BKP_BLOCK_1)
4601 buffer = XLogReadBuffer(xlrec->target.node, blkno, false);
4602 if (!BufferIsValid(buffer))
4604 page = (Page) BufferGetPage(buffer);
4606 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4608 UnlockReleaseBuffer(buffer);
4612 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4613 if (PageGetMaxOffsetNumber(page) >= offnum)
4614 lp = PageGetItemId(page, offnum);
4616 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
4617 elog(PANIC, "heap_delete_redo: invalid lp");
4619 htup = (HeapTupleHeader) PageGetItem(page, lp);
4621 htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
4623 HEAP_XMAX_IS_MULTI |
4626 HeapTupleHeaderClearHotUpdated(htup);
4627 HeapTupleHeaderSetXmax(htup, record->xl_xid);
4628 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
4630 /* Mark the page as a candidate for pruning */
4631 PageSetPrunable(page, record->xl_xid);
4633 if (xlrec->all_visible_cleared)
4634 PageClearAllVisible(page);
4636 /* Make sure there is no forward chain link in t_ctid */
4637 htup->t_ctid = xlrec->target.tid;
4638 PageSetLSN(page, lsn);
4639 PageSetTLI(page, ThisTimeLineID);
4640 MarkBufferDirty(buffer);
4641 UnlockReleaseBuffer(buffer);
4645 heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
4647 xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
4650 OffsetNumber offnum;
4653 HeapTupleHeaderData hdr;
4654 char data[MaxHeapTupleSize];
4656 HeapTupleHeader htup;
4657 xl_heap_header xlhdr;
4662 blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
4665 * The visibility map may need to be fixed even if the heap page is
4666 * already up-to-date.
4668 if (xlrec->all_visible_cleared)
4670 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
4671 Buffer vmbuffer = InvalidBuffer;
4673 visibilitymap_pin(reln, blkno, &vmbuffer);
4674 visibilitymap_clear(reln, blkno, vmbuffer);
4675 ReleaseBuffer(vmbuffer);
4676 FreeFakeRelcacheEntry(reln);
4679 if (record->xl_info & XLR_BKP_BLOCK_1)
4682 if (record->xl_info & XLOG_HEAP_INIT_PAGE)
4684 buffer = XLogReadBuffer(xlrec->target.node, blkno, true);
4685 Assert(BufferIsValid(buffer));
4686 page = (Page) BufferGetPage(buffer);
4688 PageInit(page, BufferGetPageSize(buffer), 0);
4692 buffer = XLogReadBuffer(xlrec->target.node, blkno, false);
4693 if (!BufferIsValid(buffer))
4695 page = (Page) BufferGetPage(buffer);
4697 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4699 UnlockReleaseBuffer(buffer);
4704 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4705 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
4706 elog(PANIC, "heap_insert_redo: invalid max offset number");
4708 newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
4709 Assert(newlen <= MaxHeapTupleSize);
4710 memcpy((char *) &xlhdr,
4711 (char *) xlrec + SizeOfHeapInsert,
4714 MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
4715 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
4716 memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
4717 (char *) xlrec + SizeOfHeapInsert + SizeOfHeapHeader,
4719 newlen += offsetof(HeapTupleHeaderData, t_bits);
4720 htup->t_infomask2 = xlhdr.t_infomask2;
4721 htup->t_infomask = xlhdr.t_infomask;
4722 htup->t_hoff = xlhdr.t_hoff;
4723 HeapTupleHeaderSetXmin(htup, record->xl_xid);
4724 HeapTupleHeaderSetCmin(htup, FirstCommandId);
4725 htup->t_ctid = xlrec->target.tid;
4727 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
4728 if (offnum == InvalidOffsetNumber)
4729 elog(PANIC, "heap_insert_redo: failed to add tuple");
4731 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
4733 PageSetLSN(page, lsn);
4734 PageSetTLI(page, ThisTimeLineID);
4736 if (xlrec->all_visible_cleared)
4737 PageClearAllVisible(page);
4739 MarkBufferDirty(buffer);
4740 UnlockReleaseBuffer(buffer);
4743 * If the page is running low on free space, update the FSM as well.
4744 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
4745 * better than that without knowing the fill-factor for the table.
4747 * XXX: We don't get here if the page was restored from full page image.
4748 * We don't bother to update the FSM in that case, it doesn't need to be
4749 * totally accurate anyway.
4751 if (freespace < BLCKSZ / 5)
4752 XLogRecordPageWithFreeSpace(xlrec->target.node, blkno, freespace);
4756 * Handles UPDATE and HOT_UPDATE
4759 heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
4761 xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
4763 bool samepage = (ItemPointerGetBlockNumber(&(xlrec->newtid)) ==
4764 ItemPointerGetBlockNumber(&(xlrec->target.tid)));
4766 OffsetNumber offnum;
4768 HeapTupleHeader htup;
4771 HeapTupleHeaderData hdr;
4772 char data[MaxHeapTupleSize];
4774 xl_heap_header xlhdr;
4780 * The visibility map may need to be fixed even if the heap page is
4781 * already up-to-date.
4783 if (xlrec->all_visible_cleared)
4785 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
4786 BlockNumber block = ItemPointerGetBlockNumber(&xlrec->target.tid);
4787 Buffer vmbuffer = InvalidBuffer;
4789 visibilitymap_pin(reln, block, &vmbuffer);
4790 visibilitymap_clear(reln, block, vmbuffer);
4791 ReleaseBuffer(vmbuffer);
4792 FreeFakeRelcacheEntry(reln);
4795 if (record->xl_info & XLR_BKP_BLOCK_1)
4798 return; /* backup block covered both changes */
4802 /* Deal with old tuple version */
4804 buffer = XLogReadBuffer(xlrec->target.node,
4805 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
4807 if (!BufferIsValid(buffer))
4809 page = (Page) BufferGetPage(buffer);
4811 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4813 UnlockReleaseBuffer(buffer);
4819 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4820 if (PageGetMaxOffsetNumber(page) >= offnum)
4821 lp = PageGetItemId(page, offnum);
4823 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
4824 elog(PANIC, "heap_update_redo: invalid lp");
4826 htup = (HeapTupleHeader) PageGetItem(page, lp);
4828 htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
4830 HEAP_XMAX_IS_MULTI |
4834 HeapTupleHeaderSetHotUpdated(htup);
4836 HeapTupleHeaderClearHotUpdated(htup);
4837 HeapTupleHeaderSetXmax(htup, record->xl_xid);
4838 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
4839 /* Set forward chain link in t_ctid */
4840 htup->t_ctid = xlrec->newtid;
4842 /* Mark the page as a candidate for pruning */
4843 PageSetPrunable(page, record->xl_xid);
4845 if (xlrec->all_visible_cleared)
4846 PageClearAllVisible(page);
4849 * this test is ugly, but necessary to avoid thinking that insert change
4850 * is already applied
4854 PageSetLSN(page, lsn);
4855 PageSetTLI(page, ThisTimeLineID);
4856 MarkBufferDirty(buffer);
4857 UnlockReleaseBuffer(buffer);
4859 /* Deal with new tuple */
4864 * The visibility map may need to be fixed even if the heap page is
4865 * already up-to-date.
4867 if (xlrec->new_all_visible_cleared)
4869 Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
4870 BlockNumber block = ItemPointerGetBlockNumber(&xlrec->newtid);
4871 Buffer vmbuffer = InvalidBuffer;
4873 visibilitymap_pin(reln, block, &vmbuffer);
4874 visibilitymap_clear(reln, block, vmbuffer);
4875 ReleaseBuffer(vmbuffer);
4876 FreeFakeRelcacheEntry(reln);
4879 if (record->xl_info & XLR_BKP_BLOCK_2)
4882 if (record->xl_info & XLOG_HEAP_INIT_PAGE)
4884 buffer = XLogReadBuffer(xlrec->target.node,
4885 ItemPointerGetBlockNumber(&(xlrec->newtid)),
4887 Assert(BufferIsValid(buffer));
4888 page = (Page) BufferGetPage(buffer);
4890 PageInit(page, BufferGetPageSize(buffer), 0);
4894 buffer = XLogReadBuffer(xlrec->target.node,
4895 ItemPointerGetBlockNumber(&(xlrec->newtid)),
4897 if (!BufferIsValid(buffer))
4899 page = (Page) BufferGetPage(buffer);
4901 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4903 UnlockReleaseBuffer(buffer);
4910 offnum = ItemPointerGetOffsetNumber(&(xlrec->newtid));
4911 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
4912 elog(PANIC, "heap_update_redo: invalid max offset number");
4914 hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
4916 newlen = record->xl_len - hsize;
4917 Assert(newlen <= MaxHeapTupleSize);
4918 memcpy((char *) &xlhdr,
4919 (char *) xlrec + SizeOfHeapUpdate,
4922 MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
4923 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
4924 memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
4925 (char *) xlrec + hsize,
4927 newlen += offsetof(HeapTupleHeaderData, t_bits);
4928 htup->t_infomask2 = xlhdr.t_infomask2;
4929 htup->t_infomask = xlhdr.t_infomask;
4930 htup->t_hoff = xlhdr.t_hoff;
4932 HeapTupleHeaderSetXmin(htup, record->xl_xid);
4933 HeapTupleHeaderSetCmin(htup, FirstCommandId);
4934 /* Make sure there is no forward chain link in t_ctid */
4935 htup->t_ctid = xlrec->newtid;
4937 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
4938 if (offnum == InvalidOffsetNumber)
4939 elog(PANIC, "heap_update_redo: failed to add tuple");
4941 if (xlrec->new_all_visible_cleared)
4942 PageClearAllVisible(page);
4944 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
4946 PageSetLSN(page, lsn);
4947 PageSetTLI(page, ThisTimeLineID);
4948 MarkBufferDirty(buffer);
4949 UnlockReleaseBuffer(buffer);
4952 * If the page is running low on free space, update the FSM as well.
4953 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
4954 * better than that without knowing the fill-factor for the table.
4956 * However, don't update the FSM on HOT updates, because after crash
4957 * recovery, either the old or the new tuple will certainly be dead and
4958 * prunable. After pruning, the page will have roughly as much free space
4959 * as it did before the update, assuming the new tuple is about the same
4960 * size as the old one.
4962 * XXX: We don't get here if the page was restored from full page image.
4963 * We don't bother to update the FSM in that case, it doesn't need to be
4964 * totally accurate anyway.
4966 if (!hot_update && freespace < BLCKSZ / 5)
4967 XLogRecordPageWithFreeSpace(xlrec->target.node,
4968 ItemPointerGetBlockNumber(&(xlrec->newtid)), freespace);
4972 heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
4974 xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
4977 OffsetNumber offnum;
4979 HeapTupleHeader htup;
4981 if (record->xl_info & XLR_BKP_BLOCK_1)
4984 buffer = XLogReadBuffer(xlrec->target.node,
4985 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
4987 if (!BufferIsValid(buffer))
4989 page = (Page) BufferGetPage(buffer);
4991 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
4993 UnlockReleaseBuffer(buffer);
4997 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
4998 if (PageGetMaxOffsetNumber(page) >= offnum)
4999 lp = PageGetItemId(page, offnum);
5001 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5002 elog(PANIC, "heap_lock_redo: invalid lp");
5004 htup = (HeapTupleHeader) PageGetItem(page, lp);
5006 htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
5008 HEAP_XMAX_IS_MULTI |
5011 if (xlrec->xid_is_mxact)
5012 htup->t_infomask |= HEAP_XMAX_IS_MULTI;
5013 if (xlrec->shared_lock)
5014 htup->t_infomask |= HEAP_XMAX_SHARED_LOCK;
5016 htup->t_infomask |= HEAP_XMAX_EXCL_LOCK;
5017 HeapTupleHeaderClearHotUpdated(htup);
5018 HeapTupleHeaderSetXmax(htup, xlrec->locking_xid);
5019 HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
5020 /* Make sure there is no forward chain link in t_ctid */
5021 htup->t_ctid = xlrec->target.tid;
5022 PageSetLSN(page, lsn);
5023 PageSetTLI(page, ThisTimeLineID);
5024 MarkBufferDirty(buffer);
5025 UnlockReleaseBuffer(buffer);
5029 heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record)
5031 xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
5034 OffsetNumber offnum;
5036 HeapTupleHeader htup;
5040 if (record->xl_info & XLR_BKP_BLOCK_1)
5043 buffer = XLogReadBuffer(xlrec->target.node,
5044 ItemPointerGetBlockNumber(&(xlrec->target.tid)),
5046 if (!BufferIsValid(buffer))
5048 page = (Page) BufferGetPage(buffer);
5050 if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
5052 UnlockReleaseBuffer(buffer);
5056 offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
5057 if (PageGetMaxOffsetNumber(page) >= offnum)
5058 lp = PageGetItemId(page, offnum);
5060 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5061 elog(PANIC, "heap_inplace_redo: invalid lp");
5063 htup = (HeapTupleHeader) PageGetItem(page, lp);
5065 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
5066 newlen = record->xl_len - SizeOfHeapInplace;
5067 if (oldlen != newlen)
5068 elog(PANIC, "heap_inplace_redo: wrong tuple length");
5070 memcpy((char *) htup + htup->t_hoff,
5071 (char *) xlrec + SizeOfHeapInplace,
5074 PageSetLSN(page, lsn);
5075 PageSetTLI(page, ThisTimeLineID);
5076 MarkBufferDirty(buffer);
5077 UnlockReleaseBuffer(buffer);
5081 heap_redo(XLogRecPtr lsn, XLogRecord *record)
5083 uint8 info = record->xl_info & ~XLR_INFO_MASK;
5086 * These operations don't overwrite MVCC data so no conflict processing is
5087 * required. The ones in heap2 rmgr do.
5090 RestoreBkpBlocks(lsn, record, false);
5092 switch (info & XLOG_HEAP_OPMASK)
5094 case XLOG_HEAP_INSERT:
5095 heap_xlog_insert(lsn, record);
5097 case XLOG_HEAP_DELETE:
5098 heap_xlog_delete(lsn, record);
5100 case XLOG_HEAP_UPDATE:
5101 heap_xlog_update(lsn, record, false);
5103 case XLOG_HEAP_HOT_UPDATE:
5104 heap_xlog_update(lsn, record, true);
5106 case XLOG_HEAP_NEWPAGE:
5107 heap_xlog_newpage(lsn, record);
5109 case XLOG_HEAP_LOCK:
5110 heap_xlog_lock(lsn, record);
5112 case XLOG_HEAP_INPLACE:
5113 heap_xlog_inplace(lsn, record);
5116 elog(PANIC, "heap_redo: unknown op code %u", info);
5121 heap2_redo(XLogRecPtr lsn, XLogRecord *record)
5123 uint8 info = record->xl_info & ~XLR_INFO_MASK;
5126 * Note that RestoreBkpBlocks() is called after conflict processing within
5127 * each record type handling function.
5130 switch (info & XLOG_HEAP_OPMASK)
5132 case XLOG_HEAP2_FREEZE:
5133 heap_xlog_freeze(lsn, record);
5135 case XLOG_HEAP2_CLEAN:
5136 heap_xlog_clean(lsn, record);
5138 case XLOG_HEAP2_CLEANUP_INFO:
5139 heap_xlog_cleanup_info(lsn, record);
5141 case XLOG_HEAP2_VISIBLE:
5142 heap_xlog_visible(lsn, record);
5145 elog(PANIC, "heap2_redo: unknown op code %u", info);
5150 out_target(StringInfo buf, xl_heaptid *target)
5152 appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u",
5153 target->node.spcNode, target->node.dbNode, target->node.relNode,
5154 ItemPointerGetBlockNumber(&(target->tid)),
5155 ItemPointerGetOffsetNumber(&(target->tid)));
5159 heap_desc(StringInfo buf, uint8 xl_info, char *rec)
5161 uint8 info = xl_info & ~XLR_INFO_MASK;
5163 info &= XLOG_HEAP_OPMASK;
5164 if (info == XLOG_HEAP_INSERT)
5166 xl_heap_insert *xlrec = (xl_heap_insert *) rec;
5168 if (xl_info & XLOG_HEAP_INIT_PAGE)
5169 appendStringInfo(buf, "insert(init): ");
5171 appendStringInfo(buf, "insert: ");
5172 out_target(buf, &(xlrec->target));
5174 else if (info == XLOG_HEAP_DELETE)
5176 xl_heap_delete *xlrec = (xl_heap_delete *) rec;
5178 appendStringInfo(buf, "delete: ");
5179 out_target(buf, &(xlrec->target));
5181 else if (info == XLOG_HEAP_UPDATE)
5183 xl_heap_update *xlrec = (xl_heap_update *) rec;
5185 if (xl_info & XLOG_HEAP_INIT_PAGE)
5186 appendStringInfo(buf, "update(init): ");
5188 appendStringInfo(buf, "update: ");
5189 out_target(buf, &(xlrec->target));
5190 appendStringInfo(buf, "; new %u/%u",
5191 ItemPointerGetBlockNumber(&(xlrec->newtid)),
5192 ItemPointerGetOffsetNumber(&(xlrec->newtid)));
5194 else if (info == XLOG_HEAP_HOT_UPDATE)
5196 xl_heap_update *xlrec = (xl_heap_update *) rec;
5198 if (xl_info & XLOG_HEAP_INIT_PAGE) /* can this case happen? */
5199 appendStringInfo(buf, "hot_update(init): ");
5201 appendStringInfo(buf, "hot_update: ");
5202 out_target(buf, &(xlrec->target));
5203 appendStringInfo(buf, "; new %u/%u",
5204 ItemPointerGetBlockNumber(&(xlrec->newtid)),
5205 ItemPointerGetOffsetNumber(&(xlrec->newtid)));
5207 else if (info == XLOG_HEAP_NEWPAGE)
5209 xl_heap_newpage *xlrec = (xl_heap_newpage *) rec;
5211 appendStringInfo(buf, "newpage: rel %u/%u/%u; fork %u, blk %u",
5212 xlrec->node.spcNode, xlrec->node.dbNode,
5213 xlrec->node.relNode, xlrec->forknum,
5216 else if (info == XLOG_HEAP_LOCK)
5218 xl_heap_lock *xlrec = (xl_heap_lock *) rec;
5220 if (xlrec->shared_lock)
5221 appendStringInfo(buf, "shared_lock: ");
5223 appendStringInfo(buf, "exclusive_lock: ");
5224 if (xlrec->xid_is_mxact)
5225 appendStringInfo(buf, "mxid ");
5227 appendStringInfo(buf, "xid ");
5228 appendStringInfo(buf, "%u ", xlrec->locking_xid);
5229 out_target(buf, &(xlrec->target));
5231 else if (info == XLOG_HEAP_INPLACE)
5233 xl_heap_inplace *xlrec = (xl_heap_inplace *) rec;
5235 appendStringInfo(buf, "inplace: ");
5236 out_target(buf, &(xlrec->target));
5239 appendStringInfo(buf, "UNKNOWN");
5243 heap2_desc(StringInfo buf, uint8 xl_info, char *rec)
5245 uint8 info = xl_info & ~XLR_INFO_MASK;
5247 info &= XLOG_HEAP_OPMASK;
5248 if (info == XLOG_HEAP2_FREEZE)
5250 xl_heap_freeze *xlrec = (xl_heap_freeze *) rec;
5252 appendStringInfo(buf, "freeze: rel %u/%u/%u; blk %u; cutoff %u",
5253 xlrec->node.spcNode, xlrec->node.dbNode,
5254 xlrec->node.relNode, xlrec->block,
5257 else if (info == XLOG_HEAP2_CLEAN)
5259 xl_heap_clean *xlrec = (xl_heap_clean *) rec;
5261 appendStringInfo(buf, "clean: rel %u/%u/%u; blk %u remxid %u",
5262 xlrec->node.spcNode, xlrec->node.dbNode,
5263 xlrec->node.relNode, xlrec->block,
5264 xlrec->latestRemovedXid);
5266 else if (info == XLOG_HEAP2_CLEANUP_INFO)
5268 xl_heap_cleanup_info *xlrec = (xl_heap_cleanup_info *) rec;
5270 appendStringInfo(buf, "cleanup info: remxid %u",
5271 xlrec->latestRemovedXid);
5273 else if (info == XLOG_HEAP2_VISIBLE)
5275 xl_heap_visible *xlrec = (xl_heap_visible *) rec;
5277 appendStringInfo(buf, "visible: rel %u/%u/%u; blk %u",
5278 xlrec->node.spcNode, xlrec->node.dbNode,
5279 xlrec->node.relNode, xlrec->block);
5282 appendStringInfo(buf, "UNKNOWN");
5286 * heap_sync - sync a heap, for use when no WAL has been written
5288 * This forces the heap contents (including TOAST heap if any) down to disk.
5289 * If we skipped using WAL, and WAL is otherwise needed, we must force the
5290 * relation down to disk before it's safe to commit the transaction. This
5291 * requires writing out any dirty buffers and then doing a forced fsync.
5293 * Indexes are not touched. (Currently, index operations associated with
5294 * the commands that use this are WAL-logged and so do not need fsync.
5295 * That behavior might change someday, but in any case it's likely that
5296 * any fsync decisions required would be per-index and hence not appropriate
5300 heap_sync(Relation rel)
5302 /* non-WAL-logged tables never need fsync */
5303 if (!RelationNeedsWAL(rel))
5307 FlushRelationBuffers(rel);
5308 /* FlushRelationBuffers will have opened rd_smgr */
5309 smgrimmedsync(rel->rd_smgr, MAIN_FORKNUM);
5311 /* FSM is not critical, don't bother syncing it */
5313 /* toast heap, if any */
5314 if (OidIsValid(rel->rd_rel->reltoastrelid))
5318 toastrel = heap_open(rel->rd_rel->reltoastrelid, AccessShareLock);
5319 FlushRelationBuffers(toastrel);
5320 smgrimmedsync(toastrel->rd_smgr, MAIN_FORKNUM);
5321 heap_close(toastrel, AccessShareLock);