1 /*-------------------------------------------------------------------------
4 * bitmap for tracking visibility of heap tuples
6 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $PostgreSQL: pgsql/src/backend/access/heap/visibilitymap.c,v 1.4 2009/06/11 14:48:54 momjian Exp $
14 * visibilitymap_clear - clear a bit in the visibility map
15 * visibilitymap_pin - pin a map page for setting a bit
16 * visibilitymap_set - set a bit in a previously pinned page
17 * visibilitymap_test - test if a bit is set
21 * The visibility map is a bitmap with one bit per heap page. A set bit means
22 * that all tuples on the page are visible to all transactions, and doesn't
23 * therefore need to be vacuumed. The map is conservative in the sense that we
24 * make sure that whenever a bit is set, we know the condition is true, but if
25 * a bit is not set, it might or might not be.
27 * There's no explicit WAL logging in the functions in this file. The callers
28 * must make sure that whenever a bit is cleared, the bit is cleared on WAL
29 * replay of the updating operation as well. Setting bits during recovery
30 * isn't necessary for correctness.
32 * Currently, the visibility map is only used as a hint, to speed up VACUUM.
33 * A corrupted visibility map won't cause data corruption, although it can
34 * make VACUUM skip pages that need vacuuming, until the next anti-wraparound
35 * vacuum. The visibility map is not used for anti-wraparound vacuums, because
36 * an anti-wraparound vacuum needs to freeze tuples and observe the latest xid
37 * present in the table, also on pages that don't have any dead tuples.
39 * Although the visibility map is just a hint at the moment, the PD_ALL_VISIBLE
40 * flag on heap pages *must* be correct.
44 * In heapam.c, whenever a page is modified so that not all tuples on the
45 * page are visible to everyone anymore, the corresponding bit in the
46 * visibility map is cleared. The bit in the visibility map is cleared
47 * after releasing the lock on the heap page, to avoid holding the lock
48 * over possible I/O to read in the visibility map page.
50 * To set a bit, you need to hold a lock on the heap page. That prevents
51 * the race condition where VACUUM sees that all tuples on the page are
52 * visible to everyone, but another backend modifies the page before VACUUM
53 * sets the bit in the visibility map.
55 * When a bit is set, the LSN of the visibility map page is updated to make
56 * sure that the visibility map update doesn't get written to disk before the
57 * WAL record of the changes that made it possible to set the bit is flushed.
58 * But when a bit is cleared, we don't have to do that because it's always OK
59 * to clear a bit in the map from correctness point of view.
63 * It would be nice to use the visibility map to skip visibility checkes in
66 * Currently, the visibility map is not 100% correct all the time.
67 * During updates, the bit in the visibility map is cleared after releasing
68 * the lock on the heap page. During the window after releasing the lock
69 * and clearing the bit in the visibility map, the bit in the visibility map
70 * is set, but the new insertion or deletion is not yet visible to other
73 * That might actually be OK for the index scans, though. The newly inserted
74 * tuple wouldn't have an index pointer yet, so all tuples reachable from an
75 * index would still be visible to all other backends, and deletions wouldn't
76 * be visible to other backends yet.
78 * There's another hole in the way the PD_ALL_VISIBLE flag is set. When
79 * vacuum observes that all tuples are visible to all, it sets the flag on
80 * the heap page, and also sets the bit in the visibility map. If we then
81 * crash, and only the visibility map page was flushed to disk, we'll have
82 * a bit set in the visibility map, but the corresponding flag on the heap
83 * page is not set. If the heap page is then updated, the updater won't
84 * know to clear the bit in the visibility map.
86 *-------------------------------------------------------------------------
90 #include "access/visibilitymap.h"
91 #include "storage/bufmgr.h"
92 #include "storage/bufpage.h"
93 #include "storage/lmgr.h"
94 #include "storage/smgr.h"
95 #include "utils/inval.h"
97 /*#define TRACE_VISIBILITYMAP */
100 * Size of the bitmap on each visibility map page, in bytes. There's no
101 * extra headers, so the whole page minus except for the standard page header
102 * is used for the bitmap.
104 #define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
106 /* Number of bits allocated for each heap block. */
107 #define BITS_PER_HEAPBLOCK 1
109 /* Number of heap blocks we can represent in one byte. */
110 #define HEAPBLOCKS_PER_BYTE 8
112 /* Number of heap blocks we can represent in one visibility map page. */
113 #define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
115 /* Mapping from heap block number to the right bit in the visibility map */
116 #define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
117 #define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
118 #define HEAPBLK_TO_MAPBIT(x) ((x) % HEAPBLOCKS_PER_BYTE)
120 /* prototypes for internal routines */
121 static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
122 static void vm_extend(Relation rel, BlockNumber nvmblocks);
126 * visibilitymap_clear - clear a bit in visibility map
128 * Clear a bit in the visibility map, marking that not all tuples are
129 * visible to all transactions anymore.
132 visibilitymap_clear(Relation rel, BlockNumber heapBlk)
134 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
135 int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
136 int mapBit = HEAPBLK_TO_MAPBIT(heapBlk);
137 uint8 mask = 1 << mapBit;
141 #ifdef TRACE_VISIBILITYMAP
142 elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
145 mapBuffer = vm_readbuf(rel, mapBlock, false);
146 if (!BufferIsValid(mapBuffer))
147 return; /* nothing to do */
149 LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
150 map = PageGetContents(BufferGetPage(mapBuffer));
152 if (map[mapByte] & mask)
154 map[mapByte] &= ~mask;
156 MarkBufferDirty(mapBuffer);
159 UnlockReleaseBuffer(mapBuffer);
163 * visibilitymap_pin - pin a map page for setting a bit
165 * Setting a bit in the visibility map is a two-phase operation. First, call
166 * visibilitymap_pin, to pin the visibility map page containing the bit for
167 * the heap page. Because that can require I/O to read the map page, you
168 * shouldn't hold a lock on the heap page while doing that. Then, call
169 * visibilitymap_set to actually set the bit.
171 * On entry, *buf should be InvalidBuffer or a valid buffer returned by
172 * an earlier call to visibilitymap_pin or visibilitymap_test on the same
173 * relation. On return, *buf is a valid buffer with the map page containing
174 * the the bit for heapBlk.
176 * If the page doesn't exist in the map file yet, it is extended.
179 visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
181 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
183 /* Reuse the old pinned buffer if possible */
184 if (BufferIsValid(*buf))
186 if (BufferGetBlockNumber(*buf) == mapBlock)
191 *buf = vm_readbuf(rel, mapBlock, true);
195 * visibilitymap_set - set a bit on a previously pinned page
197 * recptr is the LSN of the heap page. The LSN of the visibility map page is
198 * advanced to that, to make sure that the visibility map doesn't get flushed
199 * to disk before the update to the heap page that made all tuples visible.
201 * This is an opportunistic function. It does nothing, unless *buf
202 * contains the bit for heapBlk. Call visibilitymap_pin first to pin
203 * the right map page. This function doesn't do any I/O.
206 visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr,
209 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
210 uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
211 uint8 mapBit = HEAPBLK_TO_MAPBIT(heapBlk);
215 #ifdef TRACE_VISIBILITYMAP
216 elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
219 /* Check that we have the right page pinned */
220 if (!BufferIsValid(*buf) || BufferGetBlockNumber(*buf) != mapBlock)
223 page = BufferGetPage(*buf);
224 map = PageGetContents(page);
225 LockBuffer(*buf, BUFFER_LOCK_EXCLUSIVE);
227 if (!(map[mapByte] & (1 << mapBit)))
229 map[mapByte] |= (1 << mapBit);
231 if (XLByteLT(PageGetLSN(page), recptr))
232 PageSetLSN(page, recptr);
233 PageSetTLI(page, ThisTimeLineID);
234 MarkBufferDirty(*buf);
237 LockBuffer(*buf, BUFFER_LOCK_UNLOCK);
241 * visibilitymap_test - test if a bit is set
243 * Are all tuples on heapBlk visible to all, according to the visibility map?
245 * On entry, *buf should be InvalidBuffer or a valid buffer returned by an
246 * earlier call to visibilitymap_pin or visibilitymap_test on the same
247 * relation. On return, *buf is a valid buffer with the map page containing
248 * the the bit for heapBlk, or InvalidBuffer. The caller is responsible for
249 * releasing *buf after it's done testing and setting bits.
252 visibilitymap_test(Relation rel, BlockNumber heapBlk, Buffer *buf)
254 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
255 uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
256 uint8 mapBit = HEAPBLK_TO_MAPBIT(heapBlk);
260 #ifdef TRACE_VISIBILITYMAP
261 elog(DEBUG1, "vm_test %s %d", RelationGetRelationName(rel), heapBlk);
264 /* Reuse the old pinned buffer if possible */
265 if (BufferIsValid(*buf))
267 if (BufferGetBlockNumber(*buf) != mapBlock)
270 *buf = InvalidBuffer;
274 if (!BufferIsValid(*buf))
276 *buf = vm_readbuf(rel, mapBlock, false);
277 if (!BufferIsValid(*buf))
281 map = PageGetContents(BufferGetPage(*buf));
284 * We don't need to lock the page, as we're only looking at a single bit.
286 result = (map[mapByte] & (1 << mapBit)) ? true : false;
292 * visibilitymap_test - truncate the visibility map
295 visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
297 BlockNumber newnblocks;
299 /* last remaining block, byte, and bit */
300 BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks);
301 uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
302 uint8 truncBit = HEAPBLK_TO_MAPBIT(nheapblocks);
304 #ifdef TRACE_VISIBILITYMAP
305 elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
309 * If no visibility map has been created yet for this relation, there's
310 * nothing to truncate.
312 if (!smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM))
316 * Unless the new size is exactly at a visibility map page boundary, the
317 * tail bits in the last remaining map page, representing truncated heap
318 * blocks, need to be cleared. This is not only tidy, but also necessary
319 * because we don't get a chance to clear the bits if the heap is extended
322 if (truncByte != 0 || truncBit != 0)
328 newnblocks = truncBlock + 1;
330 mapBuffer = vm_readbuf(rel, truncBlock, false);
331 if (!BufferIsValid(mapBuffer))
333 /* nothing to do, the file was already smaller */
337 page = BufferGetPage(mapBuffer);
338 map = PageGetContents(page);
340 LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
342 /* Clear out the unwanted bytes. */
343 MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1));
346 * Mask out the unwanted bits of the last remaining byte.
348 * ((1 << 0) - 1) = 00000000 ((1 << 1) - 1) = 00000001 ... ((1 << 6) -
349 * 1) = 00111111 ((1 << 7) - 1) = 01111111
351 map[truncByte] &= (1 << truncBit) - 1;
353 MarkBufferDirty(mapBuffer);
354 UnlockReleaseBuffer(mapBuffer);
357 newnblocks = truncBlock;
359 if (smgrnblocks(rel->rd_smgr, VISIBILITYMAP_FORKNUM) < newnblocks)
361 /* nothing to do, the file was already smaller than requested size */
365 smgrtruncate(rel->rd_smgr, VISIBILITYMAP_FORKNUM, newnblocks,
369 * Need to invalidate the relcache entry, because rd_vm_nblocks seen by
370 * other backends is no longer valid.
373 CacheInvalidateRelcache(rel);
375 rel->rd_vm_nblocks = newnblocks;
379 * Read a visibility map page.
381 * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
382 * true, the visibility map file is extended.
385 vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
389 RelationOpenSmgr(rel);
392 * The current size of the visibility map fork is kept in relcache, to
393 * avoid reading beyond EOF. If we haven't cached the size of the map yet,
396 if (rel->rd_vm_nblocks == InvalidBlockNumber)
398 if (smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM))
399 rel->rd_vm_nblocks = smgrnblocks(rel->rd_smgr,
400 VISIBILITYMAP_FORKNUM);
402 rel->rd_vm_nblocks = 0;
405 /* Handle requests beyond EOF */
406 if (blkno >= rel->rd_vm_nblocks)
409 vm_extend(rel, blkno + 1);
411 return InvalidBuffer;
415 * Use ZERO_ON_ERROR mode, and initialize the page if necessary. It's
416 * always safe to clear bits, so it's better to clear corrupt pages than
419 buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
420 RBM_ZERO_ON_ERROR, NULL);
421 if (PageIsNew(BufferGetPage(buf)))
422 PageInit(BufferGetPage(buf), BLCKSZ, 0);
427 * Ensure that the visibility map fork is at least vm_nblocks long, extending
428 * it if necessary with zeroed pages.
431 vm_extend(Relation rel, BlockNumber vm_nblocks)
433 BlockNumber vm_nblocks_now;
436 pg = (Page) palloc(BLCKSZ);
437 PageInit(pg, BLCKSZ, 0);
440 * We use the relation extension lock to lock out other backends trying to
441 * extend the visibility map at the same time. It also locks out extension
442 * of the main fork, unnecessarily, but extending the visibility map
443 * happens seldom enough that it doesn't seem worthwhile to have a
444 * separate lock tag type for it.
446 * Note that another backend might have extended or created the relation
447 * before we get the lock.
449 LockRelationForExtension(rel, ExclusiveLock);
451 /* Create the file first if it doesn't exist */
452 if ((rel->rd_vm_nblocks == 0 || rel->rd_vm_nblocks == InvalidBlockNumber)
453 && !smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM))
455 smgrcreate(rel->rd_smgr, VISIBILITYMAP_FORKNUM, false);
459 vm_nblocks_now = smgrnblocks(rel->rd_smgr, VISIBILITYMAP_FORKNUM);
461 while (vm_nblocks_now < vm_nblocks)
463 smgrextend(rel->rd_smgr, VISIBILITYMAP_FORKNUM, vm_nblocks_now,
464 (char *) pg, rel->rd_istemp);
468 UnlockRelationForExtension(rel, ExclusiveLock);
472 /* Update the relcache with the up-to-date size */
474 CacheInvalidateRelcache(rel);
475 rel->rd_vm_nblocks = vm_nblocks_now;