1 /*-------------------------------------------------------------------------
4 * bitmap for tracking visibility of heap tuples
6 * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $PostgreSQL: pgsql/src/backend/access/heap/visibilitymap.c,v 1.8 2010/02/09 21:43:29 tgl Exp $
14 * visibilitymap_clear - clear a bit in the visibility map
15 * visibilitymap_pin - pin a map page for setting a bit
16 * visibilitymap_set - set a bit in a previously pinned page
17 * visibilitymap_test - test if a bit is set
21 * The visibility map is a bitmap with one bit per heap page. A set bit means
22 * that all tuples on the page are known visible to all transactions, and
23 * therefore the page doesn't need to be vacuumed. The map is conservative in
24 * the sense that we make sure that whenever a bit is set, we know the
25 * condition is true, but if a bit is not set, it might or might not be true.
27 * There's no explicit WAL logging in the functions in this file. The callers
28 * must make sure that whenever a bit is cleared, the bit is cleared on WAL
29 * replay of the updating operation as well. Setting bits during recovery
30 * isn't necessary for correctness.
32 * Currently, the visibility map is only used as a hint, to speed up VACUUM.
33 * A corrupted visibility map won't cause data corruption, although it can
34 * make VACUUM skip pages that need vacuuming, until the next anti-wraparound
35 * vacuum. The visibility map is not used for anti-wraparound vacuums, because
36 * an anti-wraparound vacuum needs to freeze tuples and observe the latest xid
37 * present in the table, even on pages that don't have any dead tuples.
39 * Although the visibility map is just a hint at the moment, the PD_ALL_VISIBLE
40 * flag on heap pages *must* be correct, because it is used to skip visibility
45 * In heapam.c, whenever a page is modified so that not all tuples on the
46 * page are visible to everyone anymore, the corresponding bit in the
47 * visibility map is cleared. The bit in the visibility map is cleared
48 * after releasing the lock on the heap page, to avoid holding the lock
49 * over possible I/O to read in the visibility map page.
51 * To set a bit, you need to hold a lock on the heap page. That prevents
52 * the race condition where VACUUM sees that all tuples on the page are
53 * visible to everyone, but another backend modifies the page before VACUUM
54 * sets the bit in the visibility map.
56 * When a bit is set, the LSN of the visibility map page is updated to make
57 * sure that the visibility map update doesn't get written to disk before the
58 * WAL record of the changes that made it possible to set the bit is flushed.
59 * But when a bit is cleared, we don't have to do that because it's always
60 * safe to clear a bit in the map from correctness point of view.
64 * It would be nice to use the visibility map to skip visibility checks in
67 * Currently, the visibility map is not 100% correct all the time.
68 * During updates, the bit in the visibility map is cleared after releasing
69 * the lock on the heap page. During the window between releasing the lock
70 * and clearing the bit in the visibility map, the bit in the visibility map
71 * is set, but the new insertion or deletion is not yet visible to other
74 * That might actually be OK for the index scans, though. The newly inserted
75 * tuple wouldn't have an index pointer yet, so all tuples reachable from an
76 * index would still be visible to all other backends, and deletions wouldn't
77 * be visible to other backends yet. (But HOT breaks that argument, no?)
79 * There's another hole in the way the PD_ALL_VISIBLE flag is set. When
80 * vacuum observes that all tuples are visible to all, it sets the flag on
81 * the heap page, and also sets the bit in the visibility map. If we then
82 * crash, and only the visibility map page was flushed to disk, we'll have
83 * a bit set in the visibility map, but the corresponding flag on the heap
84 * page is not set. If the heap page is then updated, the updater won't
85 * know to clear the bit in the visibility map. (Isn't that prevented by
88 *-------------------------------------------------------------------------
92 #include "access/visibilitymap.h"
93 #include "storage/bufmgr.h"
94 #include "storage/bufpage.h"
95 #include "storage/lmgr.h"
96 #include "storage/smgr.h"
99 /*#define TRACE_VISIBILITYMAP */
102 * Size of the bitmap on each visibility map page, in bytes. There's no
103 * extra headers, so the whole page minus the standard page header is
104 * used for the bitmap.
106 #define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
108 /* Number of bits allocated for each heap block. */
109 #define BITS_PER_HEAPBLOCK 1
111 /* Number of heap blocks we can represent in one byte. */
112 #define HEAPBLOCKS_PER_BYTE 8
114 /* Number of heap blocks we can represent in one visibility map page. */
115 #define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
117 /* Mapping from heap block number to the right bit in the visibility map */
118 #define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
119 #define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
120 #define HEAPBLK_TO_MAPBIT(x) ((x) % HEAPBLOCKS_PER_BYTE)
122 /* prototypes for internal routines */
123 static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
124 static void vm_extend(Relation rel, BlockNumber nvmblocks);
128 * visibilitymap_clear - clear a bit in visibility map
130 * Clear a bit in the visibility map, marking that not all tuples are
131 * visible to all transactions anymore.
134 visibilitymap_clear(Relation rel, BlockNumber heapBlk)
136 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
137 int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
138 int mapBit = HEAPBLK_TO_MAPBIT(heapBlk);
139 uint8 mask = 1 << mapBit;
143 #ifdef TRACE_VISIBILITYMAP
144 elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
147 mapBuffer = vm_readbuf(rel, mapBlock, false);
148 if (!BufferIsValid(mapBuffer))
149 return; /* nothing to do */
151 LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
152 map = PageGetContents(BufferGetPage(mapBuffer));
154 if (map[mapByte] & mask)
156 map[mapByte] &= ~mask;
158 MarkBufferDirty(mapBuffer);
161 UnlockReleaseBuffer(mapBuffer);
165 * visibilitymap_pin - pin a map page for setting a bit
167 * Setting a bit in the visibility map is a two-phase operation. First, call
168 * visibilitymap_pin, to pin the visibility map page containing the bit for
169 * the heap page. Because that can require I/O to read the map page, you
170 * shouldn't hold a lock on the heap page while doing that. Then, call
171 * visibilitymap_set to actually set the bit.
173 * On entry, *buf should be InvalidBuffer or a valid buffer returned by
174 * an earlier call to visibilitymap_pin or visibilitymap_test on the same
175 * relation. On return, *buf is a valid buffer with the map page containing
176 * the the bit for heapBlk.
178 * If the page doesn't exist in the map file yet, it is extended.
181 visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
183 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
185 /* Reuse the old pinned buffer if possible */
186 if (BufferIsValid(*buf))
188 if (BufferGetBlockNumber(*buf) == mapBlock)
193 *buf = vm_readbuf(rel, mapBlock, true);
197 * visibilitymap_set - set a bit on a previously pinned page
199 * recptr is the LSN of the heap page. The LSN of the visibility map page is
200 * advanced to that, to make sure that the visibility map doesn't get flushed
201 * to disk before the update to the heap page that made all tuples visible.
203 * This is an opportunistic function. It does nothing, unless *buf
204 * contains the bit for heapBlk. Call visibilitymap_pin first to pin
205 * the right map page. This function doesn't do any I/O.
208 visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr,
211 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
212 uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
213 uint8 mapBit = HEAPBLK_TO_MAPBIT(heapBlk);
217 #ifdef TRACE_VISIBILITYMAP
218 elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
221 /* Check that we have the right page pinned */
222 if (!BufferIsValid(*buf) || BufferGetBlockNumber(*buf) != mapBlock)
225 page = BufferGetPage(*buf);
226 map = PageGetContents(page);
227 LockBuffer(*buf, BUFFER_LOCK_EXCLUSIVE);
229 if (!(map[mapByte] & (1 << mapBit)))
231 map[mapByte] |= (1 << mapBit);
233 if (XLByteLT(PageGetLSN(page), recptr))
234 PageSetLSN(page, recptr);
235 PageSetTLI(page, ThisTimeLineID);
236 MarkBufferDirty(*buf);
239 LockBuffer(*buf, BUFFER_LOCK_UNLOCK);
243 * visibilitymap_test - test if a bit is set
245 * Are all tuples on heapBlk visible to all, according to the visibility map?
247 * On entry, *buf should be InvalidBuffer or a valid buffer returned by an
248 * earlier call to visibilitymap_pin or visibilitymap_test on the same
249 * relation. On return, *buf is a valid buffer with the map page containing
250 * the the bit for heapBlk, or InvalidBuffer. The caller is responsible for
251 * releasing *buf after it's done testing and setting bits.
254 visibilitymap_test(Relation rel, BlockNumber heapBlk, Buffer *buf)
256 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
257 uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
258 uint8 mapBit = HEAPBLK_TO_MAPBIT(heapBlk);
262 #ifdef TRACE_VISIBILITYMAP
263 elog(DEBUG1, "vm_test %s %d", RelationGetRelationName(rel), heapBlk);
266 /* Reuse the old pinned buffer if possible */
267 if (BufferIsValid(*buf))
269 if (BufferGetBlockNumber(*buf) != mapBlock)
272 *buf = InvalidBuffer;
276 if (!BufferIsValid(*buf))
278 *buf = vm_readbuf(rel, mapBlock, false);
279 if (!BufferIsValid(*buf))
283 map = PageGetContents(BufferGetPage(*buf));
286 * We don't need to lock the page, as we're only looking at a single bit.
288 result = (map[mapByte] & (1 << mapBit)) ? true : false;
294 * visibilitymap_truncate - truncate the visibility map
296 * The caller must hold AccessExclusiveLock on the relation, to ensure that
297 * other backends receive the smgr invalidation event that this function sends
298 * before they access the VM again.
300 * nheapblocks is the new size of the heap.
303 visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
305 BlockNumber newnblocks;
307 /* last remaining block, byte, and bit */
308 BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks);
309 uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
310 uint8 truncBit = HEAPBLK_TO_MAPBIT(nheapblocks);
312 #ifdef TRACE_VISIBILITYMAP
313 elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
316 RelationOpenSmgr(rel);
319 * If no visibility map has been created yet for this relation, there's
320 * nothing to truncate.
322 if (!smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM))
326 * Unless the new size is exactly at a visibility map page boundary, the
327 * tail bits in the last remaining map page, representing truncated heap
328 * blocks, need to be cleared. This is not only tidy, but also necessary
329 * because we don't get a chance to clear the bits if the heap is extended
332 if (truncByte != 0 || truncBit != 0)
338 newnblocks = truncBlock + 1;
340 mapBuffer = vm_readbuf(rel, truncBlock, false);
341 if (!BufferIsValid(mapBuffer))
343 /* nothing to do, the file was already smaller */
347 page = BufferGetPage(mapBuffer);
348 map = PageGetContents(page);
350 LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
352 /* Clear out the unwanted bytes. */
353 MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1));
356 * Mask out the unwanted bits of the last remaining byte.
358 * ((1 << 0) - 1) = 00000000 ((1 << 1) - 1) = 00000001 ... ((1 << 6) -
359 * 1) = 00111111 ((1 << 7) - 1) = 01111111
361 map[truncByte] &= (1 << truncBit) - 1;
363 MarkBufferDirty(mapBuffer);
364 UnlockReleaseBuffer(mapBuffer);
367 newnblocks = truncBlock;
369 if (smgrnblocks(rel->rd_smgr, VISIBILITYMAP_FORKNUM) <= newnblocks)
371 /* nothing to do, the file was already smaller than requested size */
375 /* Truncate the unused VM pages, and send smgr inval message */
376 smgrtruncate(rel->rd_smgr, VISIBILITYMAP_FORKNUM, newnblocks,
380 * We might as well update the local smgr_vm_nblocks setting.
381 * smgrtruncate sent an smgr cache inval message, which will cause
382 * other backends to invalidate their copy of smgr_vm_nblocks, and
383 * this one too at the next command boundary. But this ensures it
384 * isn't outright wrong until then.
387 rel->rd_smgr->smgr_vm_nblocks = newnblocks;
391 * Read a visibility map page.
393 * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
394 * true, the visibility map file is extended.
397 vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
401 RelationOpenSmgr(rel);
404 * If we haven't cached the size of the visibility map fork yet, check it
405 * first. Also recheck if the requested block seems to be past end, since
406 * our cached value might be stale. (We send smgr inval messages on
407 * truncation, but not on extension.)
409 if (rel->rd_smgr->smgr_vm_nblocks == InvalidBlockNumber ||
410 blkno >= rel->rd_smgr->smgr_vm_nblocks)
412 if (smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM))
413 rel->rd_smgr->smgr_vm_nblocks = smgrnblocks(rel->rd_smgr,
414 VISIBILITYMAP_FORKNUM);
416 rel->rd_smgr->smgr_vm_nblocks = 0;
419 /* Handle requests beyond EOF */
420 if (blkno >= rel->rd_smgr->smgr_vm_nblocks)
423 vm_extend(rel, blkno + 1);
425 return InvalidBuffer;
429 * Use ZERO_ON_ERROR mode, and initialize the page if necessary. It's
430 * always safe to clear bits, so it's better to clear corrupt pages than
433 buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
434 RBM_ZERO_ON_ERROR, NULL);
435 if (PageIsNew(BufferGetPage(buf)))
436 PageInit(BufferGetPage(buf), BLCKSZ, 0);
441 * Ensure that the visibility map fork is at least vm_nblocks long, extending
442 * it if necessary with zeroed pages.
445 vm_extend(Relation rel, BlockNumber vm_nblocks)
447 BlockNumber vm_nblocks_now;
450 pg = (Page) palloc(BLCKSZ);
451 PageInit(pg, BLCKSZ, 0);
454 * We use the relation extension lock to lock out other backends trying to
455 * extend the visibility map at the same time. It also locks out extension
456 * of the main fork, unnecessarily, but extending the visibility map
457 * happens seldom enough that it doesn't seem worthwhile to have a
458 * separate lock tag type for it.
460 * Note that another backend might have extended or created the relation
461 * by the time we get the lock.
463 LockRelationForExtension(rel, ExclusiveLock);
465 /* Might have to re-open if a cache flush happened */
466 RelationOpenSmgr(rel);
469 * Create the file first if it doesn't exist. If smgr_vm_nblocks
470 * is positive then it must exist, no need for an smgrexists call.
472 if ((rel->rd_smgr->smgr_vm_nblocks == 0 ||
473 rel->rd_smgr->smgr_vm_nblocks == InvalidBlockNumber) &&
474 !smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM))
475 smgrcreate(rel->rd_smgr, VISIBILITYMAP_FORKNUM, false);
477 vm_nblocks_now = smgrnblocks(rel->rd_smgr, VISIBILITYMAP_FORKNUM);
479 while (vm_nblocks_now < vm_nblocks)
481 smgrextend(rel->rd_smgr, VISIBILITYMAP_FORKNUM, vm_nblocks_now,
482 (char *) pg, rel->rd_istemp);
486 /* Update local cache with the up-to-date size */
487 rel->rd_smgr->smgr_vm_nblocks = vm_nblocks_now;
489 UnlockRelationForExtension(rel, ExclusiveLock);