1 /*-------------------------------------------------------------------------
4 * BTree-specific page management code for the Postgres btree access
7 * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
12 * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.75 2004/04/21 18:24:25 tgl Exp $
15 * Postgres btree pages look like ordinary relation pages. The opaque
16 * data at high addresses includes pointers to left and right siblings
17 * and flag data describing page state. The first page in a btree, page
18 * zero, is special -- it stores meta-information describing the tree.
19 * Pages one and higher store the actual tree data.
21 *-------------------------------------------------------------------------
25 #include "access/nbtree.h"
26 #include "miscadmin.h"
27 #include "storage/freespace.h"
28 #include "storage/lmgr.h"
32 * _bt_metapinit() -- Initialize the metadata page of a new btree.
34 * If markvalid is true, the index is immediately marked valid, else it
35 * will be invalid until _bt_metaproot() is called.
37 * Note: there's no real need for any locking here. Since the transaction
38 * creating the index hasn't committed yet, no one else can even see the index
39 * much less be trying to use it. (In a REINDEX-in-place scenario, that's
40 * not true, but we assume the caller holds sufficient locks on the index.)
43 _bt_metapinit(Relation rel, bool markvalid)
47 BTMetaPageData *metad;
50 if (RelationGetNumberOfBlocks(rel) != 0)
51 elog(ERROR, "cannot initialize non-empty btree index \"%s\"",
52 RelationGetRelationName(rel));
54 buf = ReadBuffer(rel, P_NEW);
55 Assert(BufferGetBlockNumber(buf) == BTREE_METAPAGE);
56 pg = BufferGetPage(buf);
58 /* NO ELOG(ERROR) from here till newmeta op is logged */
61 _bt_pageinit(pg, BufferGetPageSize(buf));
63 metad = BTPageGetMeta(pg);
64 metad->btm_magic = markvalid ? BTREE_MAGIC : 0;
65 metad->btm_version = BTREE_VERSION;
66 metad->btm_root = P_NONE;
68 metad->btm_fastroot = P_NONE;
69 metad->btm_fastlevel = 0;
71 op = (BTPageOpaque) PageGetSpecialPointer(pg);
72 op->btpo_flags = BTP_META;
77 xl_btree_newmeta xlrec;
81 xlrec.node = rel->rd_node;
82 xlrec.meta.root = metad->btm_root;
83 xlrec.meta.level = metad->btm_level;
84 xlrec.meta.fastroot = metad->btm_fastroot;
85 xlrec.meta.fastlevel = metad->btm_fastlevel;
87 rdata[0].buffer = InvalidBuffer;
88 rdata[0].data = (char *) &xlrec;
89 rdata[0].len = SizeOfBtreeNewmeta;
92 recptr = XLogInsert(RM_BTREE_ID,
93 markvalid ? XLOG_BTREE_NEWMETA : XLOG_BTREE_INVALIDMETA,
96 PageSetLSN(pg, recptr);
97 PageSetSUI(pg, ThisStartUpID);
106 * _bt_getroot() -- Get the root page of the btree.
108 * Since the root page can move around the btree file, we have to read
109 * its location from the metadata page, and then read the root page
110 * itself. If no root page exists yet, we have to create one. The
111 * standard class of race conditions exists here; I think I covered
112 * them all in the Hopi Indian rain dance of lock requests below.
114 * The access type parameter (BT_READ or BT_WRITE) controls whether
115 * a new root page will be created or not. If access = BT_READ,
116 * and no root page exists, we just return InvalidBuffer. For
117 * BT_WRITE, we try to create the root page if it doesn't exist.
118 * NOTE that the returned root page will have only a read lock set
119 * on it even if access = BT_WRITE!
121 * The returned page is not necessarily the true root --- it could be
122 * a "fast root" (a page that is alone in its level due to deletions).
123 * Also, if the root page is split while we are "in flight" to it,
124 * what we will return is the old root, which is now just the leftmost
125 * page on a probably-not-very-wide level. For most purposes this is
126 * as good as or better than the true root, so we do not bother to
127 * insist on finding the true root. We do, however, guarantee to
128 * return a live (not deleted or half-dead) page.
130 * On successful return, the root page is pinned and read-locked.
131 * The metadata page is not locked or pinned on exit.
134 _bt_getroot(Relation rel, int access)
138 BTPageOpaque metaopaque;
141 BTPageOpaque rootopaque;
142 BlockNumber rootblkno;
144 BTMetaPageData *metad;
146 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
147 metapg = BufferGetPage(metabuf);
148 metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg);
149 metad = BTPageGetMeta(metapg);
151 /* sanity-check the metapage */
152 if (!(metaopaque->btpo_flags & BTP_META) ||
153 metad->btm_magic != BTREE_MAGIC)
155 (errcode(ERRCODE_INDEX_CORRUPTED),
156 errmsg("index \"%s\" is not a btree",
157 RelationGetRelationName(rel))));
159 if (metad->btm_version != BTREE_VERSION)
161 (errcode(ERRCODE_INDEX_CORRUPTED),
162 errmsg("version mismatch in index \"%s\": file version %d, code version %d",
163 RelationGetRelationName(rel),
164 metad->btm_version, BTREE_VERSION)));
166 /* if no root page initialized yet, do it */
167 if (metad->btm_root == P_NONE)
169 /* If access = BT_READ, caller doesn't want us to create root yet */
170 if (access == BT_READ)
172 _bt_relbuf(rel, metabuf);
173 return InvalidBuffer;
176 /* trade in our read lock for a write lock */
177 LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
178 LockBuffer(metabuf, BT_WRITE);
181 * Race condition: if someone else initialized the metadata
182 * between the time we released the read lock and acquired the
183 * write lock, we must avoid doing it again.
185 if (metad->btm_root != P_NONE)
188 * Metadata initialized by someone else. In order to
189 * guarantee no deadlocks, we have to release the metadata
190 * page and start all over again. (Is that really true? But
191 * it's hardly worth trying to optimize this case.)
193 _bt_relbuf(rel, metabuf);
194 return _bt_getroot(rel, access);
198 * Get, initialize, write, and leave a lock of the appropriate
199 * type on the new root page. Since this is the first page in the
200 * tree, it's a leaf as well as the root.
202 rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
203 rootblkno = BufferGetBlockNumber(rootbuf);
204 rootpage = BufferGetPage(rootbuf);
206 _bt_pageinit(rootpage, BufferGetPageSize(rootbuf));
207 rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
208 rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE;
209 rootopaque->btpo_flags = (BTP_LEAF | BTP_ROOT);
210 rootopaque->btpo.level = 0;
212 /* NO ELOG(ERROR) till meta is updated */
213 START_CRIT_SECTION();
215 metad->btm_root = rootblkno;
216 metad->btm_level = 0;
217 metad->btm_fastroot = rootblkno;
218 metad->btm_fastlevel = 0;
223 xl_btree_newroot xlrec;
227 xlrec.node = rel->rd_node;
228 xlrec.rootblk = rootblkno;
231 rdata.buffer = InvalidBuffer;
232 rdata.data = (char *) &xlrec;
233 rdata.len = SizeOfBtreeNewroot;
236 recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT, &rdata);
238 PageSetLSN(rootpage, recptr);
239 PageSetSUI(rootpage, ThisStartUpID);
240 PageSetLSN(metapg, recptr);
241 PageSetSUI(metapg, ThisStartUpID);
246 _bt_wrtnorelbuf(rel, rootbuf);
249 * swap root write lock for read lock. There is no danger of
250 * anyone else accessing the new root page while it's unlocked,
251 * since no one else knows where it is yet.
253 LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
254 LockBuffer(rootbuf, BT_READ);
256 /* okay, metadata is correct, write and release it */
257 _bt_wrtbuf(rel, metabuf);
261 rootblkno = metad->btm_fastroot;
262 Assert(rootblkno != P_NONE);
263 rootlevel = metad->btm_fastlevel;
266 * We are done with the metapage; arrange to release it via
267 * first _bt_relandgetbuf call
273 rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
274 rootpage = BufferGetPage(rootbuf);
275 rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
277 if (!P_IGNORE(rootopaque))
280 /* it's dead, Jim. step right one page */
281 if (P_RIGHTMOST(rootopaque))
282 elog(ERROR, "no live root page found in \"%s\"",
283 RelationGetRelationName(rel));
284 rootblkno = rootopaque->btpo_next;
287 /* Note: can't check btpo.level on deleted pages */
288 if (rootopaque->btpo.level != rootlevel)
289 elog(ERROR, "root page %u of \"%s\" has level %u, expected %u",
290 rootblkno, RelationGetRelationName(rel),
291 rootopaque->btpo.level, rootlevel);
295 * By here, we have a pin and read lock on the root page, and no lock
296 * set on the metadata page. Return the root page's buffer.
302 * _bt_gettrueroot() -- Get the true root page of the btree.
304 * This is the same as the BT_READ case of _bt_getroot(), except
305 * we follow the true-root link not the fast-root link.
307 * By the time we acquire lock on the root page, it might have been split and
308 * not be the true root anymore. This is okay for the present uses of this
309 * routine; we only really need to be able to move up at least one tree level
310 * from whatever non-root page we were at. If we ever do need to lock the
311 * one true root page, we could loop here, re-reading the metapage on each
312 * failure. (Note that it wouldn't do to hold the lock on the metapage while
313 * moving to the root --- that'd deadlock against any concurrent root split.)
316 _bt_gettrueroot(Relation rel)
320 BTPageOpaque metaopaque;
323 BTPageOpaque rootopaque;
324 BlockNumber rootblkno;
326 BTMetaPageData *metad;
328 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
329 metapg = BufferGetPage(metabuf);
330 metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg);
331 metad = BTPageGetMeta(metapg);
333 if (!(metaopaque->btpo_flags & BTP_META) ||
334 metad->btm_magic != BTREE_MAGIC)
336 (errcode(ERRCODE_INDEX_CORRUPTED),
337 errmsg("index \"%s\" is not a btree",
338 RelationGetRelationName(rel))));
340 if (metad->btm_version != BTREE_VERSION)
342 (errcode(ERRCODE_INDEX_CORRUPTED),
343 errmsg("version mismatch in index \"%s\": file version %d, code version %d",
344 RelationGetRelationName(rel),
345 metad->btm_version, BTREE_VERSION)));
347 /* if no root page initialized yet, fail */
348 if (metad->btm_root == P_NONE)
350 _bt_relbuf(rel, metabuf);
351 return InvalidBuffer;
354 rootblkno = metad->btm_root;
355 rootlevel = metad->btm_level;
358 * We are done with the metapage; arrange to release it via
359 * first _bt_relandgetbuf call
365 rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
366 rootpage = BufferGetPage(rootbuf);
367 rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
369 if (!P_IGNORE(rootopaque))
372 /* it's dead, Jim. step right one page */
373 if (P_RIGHTMOST(rootopaque))
374 elog(ERROR, "no live root page found in \"%s\"",
375 RelationGetRelationName(rel));
376 rootblkno = rootopaque->btpo_next;
379 /* Note: can't check btpo.level on deleted pages */
380 if (rootopaque->btpo.level != rootlevel)
381 elog(ERROR, "root page %u of \"%s\" has level %u, expected %u",
382 rootblkno, RelationGetRelationName(rel),
383 rootopaque->btpo.level, rootlevel);
389 * _bt_getbuf() -- Get a buffer by block number for read or write.
391 * blkno == P_NEW means to get an unallocated index page.
393 * When this routine returns, the appropriate lock is set on the
394 * requested buffer and its reference count has been incremented
395 * (ie, the buffer is "locked and pinned").
398 _bt_getbuf(Relation rel, BlockNumber blkno, int access)
404 /* Read an existing block of the relation */
405 buf = ReadBuffer(rel, blkno);
406 LockBuffer(buf, access);
413 Assert(access == BT_WRITE);
416 * First see if the FSM knows of any free pages.
418 * We can't trust the FSM's report unreservedly; we have to check
419 * that the page is still free. (For example, an already-free
420 * page could have been re-used between the time the last VACUUM
421 * scanned it and the time the VACUUM made its FSM updates.)
423 * In fact, it's worse than that: we can't even assume that it's
424 * safe to take a lock on the reported page. If somebody else
425 * has a lock on it, or even worse our own caller does, we could
426 * deadlock. (The own-caller scenario is actually not improbable.
427 * Consider an index on a serial or timestamp column. Nearly all
428 * splits will be at the rightmost page, so it's entirely likely
429 * that _bt_split will call us while holding a lock on the page most
430 * recently acquired from FSM. A VACUUM running concurrently with
431 * the previous split could well have placed that page back in FSM.)
433 * To get around that, we ask for only a conditional lock on the
434 * reported page. If we fail, then someone else is using the page,
435 * and we may reasonably assume it's not free. (If we happen to be
436 * wrong, the worst consequence is the page will be lost to use till
437 * the next VACUUM, which is no big problem.)
441 blkno = GetFreeIndexPage(&rel->rd_node);
442 if (blkno == InvalidBlockNumber)
444 buf = ReadBuffer(rel, blkno);
445 if (ConditionalLockBuffer(buf))
447 page = BufferGetPage(buf);
448 if (_bt_page_recyclable(page))
450 /* Okay to use page. Re-initialize and return it */
451 _bt_pageinit(page, BufferGetPageSize(buf));
454 elog(DEBUG2, "FSM returned nonrecyclable page");
455 _bt_relbuf(rel, buf);
459 elog(DEBUG2, "FSM returned nonlockable page");
460 /* couldn't get lock, so just drop pin */
466 * Extend the relation by one page.
468 * We have to use a lock to ensure no one else is extending the rel
469 * at the same time, else we will both try to initialize the same
470 * new page. We can skip locking for new or temp relations,
471 * however, since no one else could be accessing them.
473 needLock = !(rel->rd_isnew || rel->rd_istemp);
476 LockPage(rel, 0, ExclusiveLock);
478 buf = ReadBuffer(rel, P_NEW);
481 * Release the file-extension lock; it's now OK for someone else
482 * to extend the relation some more.
485 UnlockPage(rel, 0, ExclusiveLock);
487 /* Acquire appropriate buffer lock on new page */
488 LockBuffer(buf, access);
490 /* Initialize the new page before returning it */
491 page = BufferGetPage(buf);
492 _bt_pageinit(page, BufferGetPageSize(buf));
495 /* ref count and lock type are correct */
500 * _bt_relandgetbuf() -- release a locked buffer and get another one.
502 * This is equivalent to _bt_relbuf followed by _bt_getbuf, with the
503 * exception that blkno may not be P_NEW. Also, if obuf is InvalidBuffer
504 * then it reduces to just _bt_getbuf; allowing this case simplifies some
505 * callers. The motivation for using this is to avoid two entries to the
506 * bufmgr when one will do.
509 _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
513 Assert(blkno != P_NEW);
514 if (BufferIsValid(obuf))
515 LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
516 buf = ReleaseAndReadBuffer(obuf, rel, blkno);
517 LockBuffer(buf, access);
522 * _bt_relbuf() -- release a locked buffer.
524 * Lock and pin (refcount) are both dropped. Note that either read or
525 * write lock can be dropped this way, but if we modified the buffer,
526 * this is NOT the right way to release a write lock.
529 _bt_relbuf(Relation rel, Buffer buf)
531 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
536 * _bt_wrtbuf() -- write a btree page to disk.
538 * This routine releases the lock held on the buffer and our refcount
539 * for it. It is an error to call _bt_wrtbuf() without a write lock
540 * and a pin on the buffer.
542 * NOTE: actually, the buffer manager just marks the shared buffer page
543 * dirty here; the real I/O happens later. This is okay since we are not
544 * relying on write ordering anyway. The WAL mechanism is responsible for
545 * guaranteeing correctness after a crash.
548 _bt_wrtbuf(Relation rel, Buffer buf)
550 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
555 * _bt_wrtnorelbuf() -- write a btree page to disk, but do not release
556 * our reference or lock.
558 * It is an error to call _bt_wrtnorelbuf() without a write lock
559 * and a pin on the buffer.
564 _bt_wrtnorelbuf(Relation rel, Buffer buf)
566 WriteNoReleaseBuffer(buf);
570 * _bt_pageinit() -- Initialize a new page.
572 * On return, the page header is initialized; data space is empty;
573 * special space is zeroed out.
576 _bt_pageinit(Page page, Size size)
578 PageInit(page, size, sizeof(BTPageOpaqueData));
582 * _bt_page_recyclable() -- Is an existing page recyclable?
584 * This exists to make sure _bt_getbuf and btvacuumcleanup have the same
585 * policy about whether a page is safe to re-use.
588 _bt_page_recyclable(Page page)
593 * It's possible to find an all-zeroes page in an index --- for
594 * example, a backend might successfully extend the relation one page
595 * and then crash before it is able to make a WAL entry for adding the
596 * page. If we find a zeroed page then reclaim it.
602 * Otherwise, recycle if deleted and too old to have any processes
605 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
606 if (P_ISDELETED(opaque) &&
607 TransactionIdPrecedesOrEquals(opaque->btpo.xact, RecentXmin))
613 * _bt_metaproot() -- Change the root page of the btree.
615 * Lehman and Yao require that the root page move around in order to
616 * guarantee deadlock-free short-term, fine-granularity locking. When
617 * we split the root page, we record the new parent in the metadata page
618 * for the relation. This routine does the work.
620 * No direct preconditions, but if you don't have the write lock on
621 * at least the old root page when you call this, you're making a big
622 * mistake. On exit, metapage data is correct and we no longer have
623 * a pin or lock on the metapage.
625 * Actually this is not used for splitting on-the-fly anymore. It's only used
626 * in nbtsort.c at the completion of btree building, where we know we have
627 * sole access to the index anyway.
630 _bt_metaproot(Relation rel, BlockNumber rootbknum, uint32 level)
634 BTPageOpaque metaopaque;
635 BTMetaPageData *metad;
637 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
638 metap = BufferGetPage(metabuf);
639 metaopaque = (BTPageOpaque) PageGetSpecialPointer(metap);
640 Assert(metaopaque->btpo_flags & BTP_META);
642 /* NO ELOG(ERROR) from here till newmeta op is logged */
643 START_CRIT_SECTION();
645 metad = BTPageGetMeta(metap);
646 Assert(metad->btm_magic == BTREE_MAGIC || metad->btm_magic == 0);
647 metad->btm_magic = BTREE_MAGIC; /* it's valid now for sure */
648 metad->btm_root = rootbknum;
649 metad->btm_level = level;
650 metad->btm_fastroot = rootbknum;
651 metad->btm_fastlevel = level;
656 xl_btree_newmeta xlrec;
658 XLogRecData rdata[1];
660 xlrec.node = rel->rd_node;
661 xlrec.meta.root = metad->btm_root;
662 xlrec.meta.level = metad->btm_level;
663 xlrec.meta.fastroot = metad->btm_fastroot;
664 xlrec.meta.fastlevel = metad->btm_fastlevel;
666 rdata[0].buffer = InvalidBuffer;
667 rdata[0].data = (char *) &xlrec;
668 rdata[0].len = SizeOfBtreeNewmeta;
669 rdata[0].next = NULL;
671 recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWMETA, rdata);
673 PageSetLSN(metap, recptr);
674 PageSetSUI(metap, ThisStartUpID);
679 _bt_wrtbuf(rel, metabuf);
683 * Delete item(s) from a btree page.
685 * This must only be used for deleting leaf items. Deleting an item on a
686 * non-leaf page has to be done as part of an atomic action that includes
687 * deleting the page it points to.
689 * This routine assumes that the caller has pinned and locked the buffer,
690 * and will write the buffer afterwards. Also, the given itemnos *must*
691 * appear in increasing order in the array.
694 _bt_delitems(Relation rel, Buffer buf,
695 OffsetNumber *itemnos, int nitems)
697 Page page = BufferGetPage(buf);
700 /* No ereport(ERROR) until changes are logged */
701 START_CRIT_SECTION();
704 * Delete the items in reverse order so we don't have to think about
705 * adjusting item numbers for previous deletions.
707 for (i = nitems - 1; i >= 0; i--)
708 PageIndexTupleDelete(page, itemnos[i]);
713 xl_btree_delete xlrec;
715 XLogRecData rdata[2];
717 xlrec.node = rel->rd_node;
718 xlrec.block = BufferGetBlockNumber(buf);
720 rdata[0].buffer = InvalidBuffer;
721 rdata[0].data = (char *) &xlrec;
722 rdata[0].len = SizeOfBtreeDelete;
723 rdata[0].next = &(rdata[1]);
726 * The target-offsets array is not in the buffer, but pretend that
727 * it is. When XLogInsert stores the whole buffer, the offsets
728 * array need not be stored too.
730 rdata[1].buffer = buf;
733 rdata[1].data = (char *) itemnos;
734 rdata[1].len = nitems * sizeof(OffsetNumber);
738 rdata[1].data = NULL;
741 rdata[1].next = NULL;
743 recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE, rdata);
745 PageSetLSN(page, recptr);
746 PageSetSUI(page, ThisStartUpID);
753 * _bt_pagedel() -- Delete a page from the b-tree.
755 * This action unlinks the page from the b-tree structure, removing all
756 * pointers leading to it --- but not touching its own left and right links.
757 * The page cannot be physically reclaimed right away, since other processes
758 * may currently be trying to follow links leading to the page; they have to
759 * be allowed to use its right-link to recover. See nbtree/README.
761 * On entry, the target buffer must be pinned and read-locked. This lock and
762 * pin will be dropped before exiting.
764 * Returns the number of pages successfully deleted (zero on failure; could
765 * be more than one if parent blocks were deleted).
767 * NOTE: this leaks memory. Rather than trying to clean up everything
768 * carefully, it's better to run it in a temp context that can be reset
772 _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
778 OffsetNumber poffset,
785 ScanKey itup_scankey;
790 bool parent_half_dead;
791 bool parent_one_child;
793 Buffer metabuf = InvalidBuffer;
795 BTMetaPageData *metad = NULL;
800 * We can never delete rightmost pages nor root pages. While at it,
801 * check that page is not already deleted and is empty.
803 page = BufferGetPage(buf);
804 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
805 if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
806 P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
808 _bt_relbuf(rel, buf);
813 * Save info about page, including a copy of its high key (it must
814 * have one, being non-rightmost).
816 target = BufferGetBlockNumber(buf);
817 targetlevel = opaque->btpo.level;
818 leftsib = opaque->btpo_prev;
819 itemid = PageGetItemId(page, P_HIKEY);
820 targetkey = CopyBTItem((BTItem) PageGetItem(page, itemid));
823 * We need to get an approximate pointer to the page's parent page.
824 * Use the standard search mechanism to search for the page's high
825 * key; this will give us a link to either the current parent or
826 * someplace to its left (if there are multiple equal high keys). To
827 * avoid deadlocks, we'd better drop the target page lock first.
829 _bt_relbuf(rel, buf);
830 /* we need a scan key to do our search, so build one */
831 itup_scankey = _bt_mkscankey(rel, &(targetkey->bti_itup));
832 /* find the leftmost leaf page containing this key */
833 stack = _bt_search(rel, rel->rd_rel->relnatts, itup_scankey, false,
835 /* don't need a pin on that either */
836 _bt_relbuf(rel, lbuf);
839 * If we are trying to delete an interior page, _bt_search did more
840 * than we needed. Locate the stack item pointing to our parent
847 elog(ERROR, "not enough stack items");
848 if (ilevel == targetlevel)
850 stack = stack->bts_parent;
855 * We have to lock the pages we need to modify in the standard order:
856 * moving right, then up. Else we will deadlock against other
859 * So, we need to find and write-lock the current left sibling of the
860 * target page. The sibling that was current a moment ago could have
861 * split, so we may have to move right. This search could fail if
862 * either the sibling or the target page was deleted by someone else
863 * meanwhile; if so, give up. (Right now, that should never happen,
864 * since page deletion is only done in VACUUM and there shouldn't be
865 * multiple VACUUMs concurrently on the same table.)
867 if (leftsib != P_NONE)
869 lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
870 page = BufferGetPage(lbuf);
871 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
872 while (P_ISDELETED(opaque) || opaque->btpo_next != target)
874 /* step right one page */
875 leftsib = opaque->btpo_next;
876 _bt_relbuf(rel, lbuf);
877 if (leftsib == P_NONE)
879 elog(LOG, "no left sibling (concurrent deletion?)");
882 lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
883 page = BufferGetPage(lbuf);
884 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
888 lbuf = InvalidBuffer;
891 * Next write-lock the target page itself. It should be okay to take
892 * just a write lock not a superexclusive lock, since no scans would
893 * stop on an empty page.
895 buf = _bt_getbuf(rel, target, BT_WRITE);
896 page = BufferGetPage(buf);
897 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
900 * Check page is still empty etc, else abandon deletion. The empty
901 * check is necessary since someone else might have inserted into it
902 * while we didn't have it locked; the others are just for paranoia's
905 if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
906 P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
908 _bt_relbuf(rel, buf);
909 if (BufferIsValid(lbuf))
910 _bt_relbuf(rel, lbuf);
913 if (opaque->btpo_prev != leftsib)
914 elog(ERROR, "left link changed unexpectedly");
917 * And next write-lock the (current) right sibling.
919 rightsib = opaque->btpo_next;
920 rbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
923 * Next find and write-lock the current parent of the target page.
924 * This is essentially the same as the corresponding step of
927 ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
929 pbuf = _bt_getstackbuf(rel, stack, BT_WRITE);
930 if (pbuf == InvalidBuffer)
931 elog(ERROR, "failed to re-find parent key in \"%s\"",
932 RelationGetRelationName(rel));
933 parent = stack->bts_blkno;
934 poffset = stack->bts_offset;
937 * If the target is the rightmost child of its parent, then we can't
938 * delete, unless it's also the only child --- in which case the
939 * parent changes to half-dead status.
941 page = BufferGetPage(pbuf);
942 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
943 maxoff = PageGetMaxOffsetNumber(page);
944 parent_half_dead = false;
945 parent_one_child = false;
946 if (poffset >= maxoff)
948 if (poffset == P_FIRSTDATAKEY(opaque))
949 parent_half_dead = true;
952 _bt_relbuf(rel, pbuf);
953 _bt_relbuf(rel, rbuf);
954 _bt_relbuf(rel, buf);
955 if (BufferIsValid(lbuf))
956 _bt_relbuf(rel, lbuf);
962 /* Will there be exactly one child left in this parent? */
963 if (OffsetNumberNext(P_FIRSTDATAKEY(opaque)) == maxoff)
964 parent_one_child = true;
968 * If we are deleting the next-to-last page on the target's level,
969 * then the rightsib is a candidate to become the new fast root. (In
970 * theory, it might be possible to push the fast root even further
971 * down, but the odds of doing so are slim, and the locking
972 * considerations daunting.)
974 * We can safely acquire a lock on the metapage here --- see comments for
977 if (leftsib == P_NONE)
979 page = BufferGetPage(rbuf);
980 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
981 Assert(opaque->btpo.level == targetlevel);
982 if (P_RIGHTMOST(opaque))
984 /* rightsib will be the only one left on the level */
985 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
986 metapg = BufferGetPage(metabuf);
987 metad = BTPageGetMeta(metapg);
990 * The expected case here is btm_fastlevel == targetlevel+1;
991 * if the fastlevel is <= targetlevel, something is wrong, and
992 * we choose to overwrite it to fix it.
994 if (metad->btm_fastlevel > targetlevel + 1)
996 /* no update wanted */
997 _bt_relbuf(rel, metabuf);
998 metabuf = InvalidBuffer;
1004 * Here we begin doing the deletion.
1007 /* No ereport(ERROR) until changes are logged */
1008 START_CRIT_SECTION();
1011 * Update parent. The normal case is a tad tricky because we want to
1012 * delete the target's downlink and the *following* key. Easiest way
1013 * is to copy the right sibling's downlink over the target downlink,
1014 * and then delete the following item.
1016 page = BufferGetPage(pbuf);
1017 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1018 if (parent_half_dead)
1020 PageIndexTupleDelete(page, poffset);
1021 opaque->btpo_flags |= BTP_HALF_DEAD;
1025 OffsetNumber nextoffset;
1027 itemid = PageGetItemId(page, poffset);
1028 btitem = (BTItem) PageGetItem(page, itemid);
1029 Assert(ItemPointerGetBlockNumber(&(btitem->bti_itup.t_tid)) == target);
1030 ItemPointerSet(&(btitem->bti_itup.t_tid), rightsib, P_HIKEY);
1032 nextoffset = OffsetNumberNext(poffset);
1033 /* This part is just for double-checking */
1034 itemid = PageGetItemId(page, nextoffset);
1035 btitem = (BTItem) PageGetItem(page, itemid);
1036 if (ItemPointerGetBlockNumber(&(btitem->bti_itup.t_tid)) != rightsib)
1037 elog(PANIC, "right sibling is not next child");
1039 PageIndexTupleDelete(page, nextoffset);
1043 * Update siblings' side-links. Note the target page's side-links
1044 * will continue to point to the siblings.
1046 if (BufferIsValid(lbuf))
1048 page = BufferGetPage(lbuf);
1049 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1050 Assert(opaque->btpo_next == target);
1051 opaque->btpo_next = rightsib;
1053 page = BufferGetPage(rbuf);
1054 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1055 Assert(opaque->btpo_prev == target);
1056 opaque->btpo_prev = leftsib;
1057 rightsib_empty = (P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page));
1060 * Mark the page itself deleted. It can be recycled when all current
1061 * transactions are gone; or immediately if we're doing VACUUM FULL.
1063 page = BufferGetPage(buf);
1064 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1065 opaque->btpo_flags |= BTP_DELETED;
1067 vacuum_full ? FrozenTransactionId : ReadNewTransactionId();
1069 /* And update the metapage, if needed */
1070 if (BufferIsValid(metabuf))
1072 metad->btm_fastroot = rightsib;
1073 metad->btm_fastlevel = targetlevel;
1077 if (!rel->rd_istemp)
1079 xl_btree_delete_page xlrec;
1080 xl_btree_metadata xlmeta;
1083 XLogRecData rdata[5];
1084 XLogRecData *nextrdata;
1086 xlrec.target.node = rel->rd_node;
1087 ItemPointerSet(&(xlrec.target.tid), parent, poffset);
1088 xlrec.deadblk = target;
1089 xlrec.leftblk = leftsib;
1090 xlrec.rightblk = rightsib;
1092 rdata[0].buffer = InvalidBuffer;
1093 rdata[0].data = (char *) &xlrec;
1094 rdata[0].len = SizeOfBtreeDeletePage;
1095 rdata[0].next = nextrdata = &(rdata[1]);
1097 if (BufferIsValid(metabuf))
1099 xlmeta.root = metad->btm_root;
1100 xlmeta.level = metad->btm_level;
1101 xlmeta.fastroot = metad->btm_fastroot;
1102 xlmeta.fastlevel = metad->btm_fastlevel;
1104 nextrdata->buffer = InvalidBuffer;
1105 nextrdata->data = (char *) &xlmeta;
1106 nextrdata->len = sizeof(xl_btree_metadata);
1107 nextrdata->next = nextrdata + 1;
1109 xlinfo = XLOG_BTREE_DELETE_PAGE_META;
1112 xlinfo = XLOG_BTREE_DELETE_PAGE;
1114 nextrdata->buffer = pbuf;
1115 nextrdata->data = NULL;
1117 nextrdata->next = nextrdata + 1;
1120 nextrdata->buffer = rbuf;
1121 nextrdata->data = NULL;
1123 nextrdata->next = NULL;
1125 if (BufferIsValid(lbuf))
1127 nextrdata->next = nextrdata + 1;
1129 nextrdata->buffer = lbuf;
1130 nextrdata->data = NULL;
1132 nextrdata->next = NULL;
1135 recptr = XLogInsert(RM_BTREE_ID, xlinfo, rdata);
1137 if (BufferIsValid(metabuf))
1139 PageSetLSN(metapg, recptr);
1140 PageSetSUI(metapg, ThisStartUpID);
1142 page = BufferGetPage(pbuf);
1143 PageSetLSN(page, recptr);
1144 PageSetSUI(page, ThisStartUpID);
1145 page = BufferGetPage(rbuf);
1146 PageSetLSN(page, recptr);
1147 PageSetSUI(page, ThisStartUpID);
1148 page = BufferGetPage(buf);
1149 PageSetLSN(page, recptr);
1150 PageSetSUI(page, ThisStartUpID);
1151 if (BufferIsValid(lbuf))
1153 page = BufferGetPage(lbuf);
1154 PageSetLSN(page, recptr);
1155 PageSetSUI(page, ThisStartUpID);
1161 /* Write and release buffers */
1162 if (BufferIsValid(metabuf))
1163 _bt_wrtbuf(rel, metabuf);
1164 _bt_wrtbuf(rel, pbuf);
1165 _bt_wrtbuf(rel, rbuf);
1166 _bt_wrtbuf(rel, buf);
1167 if (BufferIsValid(lbuf))
1168 _bt_wrtbuf(rel, lbuf);
1171 * If parent became half dead, recurse to try to delete it. Otherwise,
1172 * if right sibling is empty and is now the last child of the parent,
1173 * recurse to try to delete it. (These cases cannot apply at the same
1174 * time, though the second case might itself recurse to the first.)
1176 if (parent_half_dead)
1178 buf = _bt_getbuf(rel, parent, BT_READ);
1179 return _bt_pagedel(rel, buf, vacuum_full) + 1;
1181 if (parent_one_child && rightsib_empty)
1183 buf = _bt_getbuf(rel, rightsib, BT_READ);
1184 return _bt_pagedel(rel, buf, vacuum_full) + 1;