1 /*-------------------------------------------------------------------------
4 * BTree-specific page management code for the Postgres btree access
7 * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
12 * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.88 2005/10/15 02:49:09 momjian Exp $
15 * Postgres btree pages look like ordinary relation pages. The opaque
16 * data at high addresses includes pointers to left and right siblings
17 * and flag data describing page state. The first page in a btree, page
18 * zero, is special -- it stores meta-information describing the tree.
19 * Pages one and higher store the actual tree data.
21 *-------------------------------------------------------------------------
25 #include "access/nbtree.h"
26 #include "miscadmin.h"
27 #include "storage/freespace.h"
28 #include "storage/lmgr.h"
32 * _bt_metapinit() -- Initialize the metadata page of a new btree.
34 * Note: this is actually not used for standard btree index building;
35 * nbtsort.c prefers not to make the metadata page valid until completion
38 * Note: there's no real need for any locking here. Since the transaction
39 * creating the index hasn't committed yet, no one else can even see the index
40 * much less be trying to use it. (In a REINDEX-in-place scenario, that's
41 * not true, but we assume the caller holds sufficient locks on the index.)
44 _bt_metapinit(Relation rel)
48 BTMetaPageData *metad;
50 if (RelationGetNumberOfBlocks(rel) != 0)
51 elog(ERROR, "cannot initialize non-empty btree index \"%s\"",
52 RelationGetRelationName(rel));
54 buf = ReadBuffer(rel, P_NEW);
55 Assert(BufferGetBlockNumber(buf) == BTREE_METAPAGE);
56 pg = BufferGetPage(buf);
58 _bt_initmetapage(pg, P_NONE, 0);
59 metad = BTPageGetMeta(pg);
61 /* NO ELOG(ERROR) from here till newmeta op is logged */
67 xl_btree_newmeta xlrec;
71 xlrec.node = rel->rd_node;
72 xlrec.meta.root = metad->btm_root;
73 xlrec.meta.level = metad->btm_level;
74 xlrec.meta.fastroot = metad->btm_fastroot;
75 xlrec.meta.fastlevel = metad->btm_fastlevel;
77 rdata[0].data = (char *) &xlrec;
78 rdata[0].len = SizeOfBtreeNewmeta;
79 rdata[0].buffer = InvalidBuffer;
82 recptr = XLogInsert(RM_BTREE_ID,
86 PageSetLSN(pg, recptr);
87 PageSetTLI(pg, ThisTimeLineID);
96 * _bt_initmetapage() -- Fill a page buffer with a correct metapage image
99 _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
101 BTMetaPageData *metad;
102 BTPageOpaque metaopaque;
104 _bt_pageinit(page, BLCKSZ);
106 metad = BTPageGetMeta(page);
107 metad->btm_magic = BTREE_MAGIC;
108 metad->btm_version = BTREE_VERSION;
109 metad->btm_root = rootbknum;
110 metad->btm_level = level;
111 metad->btm_fastroot = rootbknum;
112 metad->btm_fastlevel = level;
114 metaopaque = (BTPageOpaque) PageGetSpecialPointer(page);
115 metaopaque->btpo_flags = BTP_META;
118 * Set pd_lower just past the end of the metadata. This is not essential
119 * but it makes the page look compressible to xlog.c.
121 ((PageHeader) page)->pd_lower =
122 ((char *) metad + sizeof(BTMetaPageData)) - (char *) page;
126 * _bt_getroot() -- Get the root page of the btree.
128 * Since the root page can move around the btree file, we have to read
129 * its location from the metadata page, and then read the root page
130 * itself. If no root page exists yet, we have to create one. The
131 * standard class of race conditions exists here; I think I covered
132 * them all in the Hopi Indian rain dance of lock requests below.
134 * The access type parameter (BT_READ or BT_WRITE) controls whether
135 * a new root page will be created or not. If access = BT_READ,
136 * and no root page exists, we just return InvalidBuffer. For
137 * BT_WRITE, we try to create the root page if it doesn't exist.
138 * NOTE that the returned root page will have only a read lock set
139 * on it even if access = BT_WRITE!
141 * The returned page is not necessarily the true root --- it could be
142 * a "fast root" (a page that is alone in its level due to deletions).
143 * Also, if the root page is split while we are "in flight" to it,
144 * what we will return is the old root, which is now just the leftmost
145 * page on a probably-not-very-wide level. For most purposes this is
146 * as good as or better than the true root, so we do not bother to
147 * insist on finding the true root. We do, however, guarantee to
148 * return a live (not deleted or half-dead) page.
150 * On successful return, the root page is pinned and read-locked.
151 * The metadata page is not locked or pinned on exit.
154 _bt_getroot(Relation rel, int access)
158 BTPageOpaque metaopaque;
161 BTPageOpaque rootopaque;
162 BlockNumber rootblkno;
164 BTMetaPageData *metad;
166 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
167 metapg = BufferGetPage(metabuf);
168 metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg);
169 metad = BTPageGetMeta(metapg);
171 /* sanity-check the metapage */
172 if (!(metaopaque->btpo_flags & BTP_META) ||
173 metad->btm_magic != BTREE_MAGIC)
175 (errcode(ERRCODE_INDEX_CORRUPTED),
176 errmsg("index \"%s\" is not a btree",
177 RelationGetRelationName(rel))));
179 if (metad->btm_version != BTREE_VERSION)
181 (errcode(ERRCODE_INDEX_CORRUPTED),
182 errmsg("version mismatch in index \"%s\": file version %d, code version %d",
183 RelationGetRelationName(rel),
184 metad->btm_version, BTREE_VERSION)));
186 /* if no root page initialized yet, do it */
187 if (metad->btm_root == P_NONE)
189 /* If access = BT_READ, caller doesn't want us to create root yet */
190 if (access == BT_READ)
192 _bt_relbuf(rel, metabuf);
193 return InvalidBuffer;
196 /* trade in our read lock for a write lock */
197 LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
198 LockBuffer(metabuf, BT_WRITE);
201 * Race condition: if someone else initialized the metadata between
202 * the time we released the read lock and acquired the write lock, we
203 * must avoid doing it again.
205 if (metad->btm_root != P_NONE)
208 * Metadata initialized by someone else. In order to guarantee no
209 * deadlocks, we have to release the metadata page and start all
210 * over again. (Is that really true? But it's hardly worth trying
211 * to optimize this case.)
213 _bt_relbuf(rel, metabuf);
214 return _bt_getroot(rel, access);
218 * Get, initialize, write, and leave a lock of the appropriate type on
219 * the new root page. Since this is the first page in the tree, it's
220 * a leaf as well as the root.
222 rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
223 rootblkno = BufferGetBlockNumber(rootbuf);
224 rootpage = BufferGetPage(rootbuf);
226 _bt_pageinit(rootpage, BufferGetPageSize(rootbuf));
227 rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
228 rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE;
229 rootopaque->btpo_flags = (BTP_LEAF | BTP_ROOT);
230 rootopaque->btpo.level = 0;
232 /* NO ELOG(ERROR) till meta is updated */
233 START_CRIT_SECTION();
235 metad->btm_root = rootblkno;
236 metad->btm_level = 0;
237 metad->btm_fastroot = rootblkno;
238 metad->btm_fastlevel = 0;
243 xl_btree_newroot xlrec;
247 xlrec.node = rel->rd_node;
248 xlrec.rootblk = rootblkno;
251 rdata.data = (char *) &xlrec;
252 rdata.len = SizeOfBtreeNewroot;
253 rdata.buffer = InvalidBuffer;
256 recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT, &rdata);
258 PageSetLSN(rootpage, recptr);
259 PageSetTLI(rootpage, ThisTimeLineID);
260 PageSetLSN(metapg, recptr);
261 PageSetTLI(metapg, ThisTimeLineID);
266 _bt_wrtnorelbuf(rel, rootbuf);
269 * swap root write lock for read lock. There is no danger of anyone
270 * else accessing the new root page while it's unlocked, since no one
271 * else knows where it is yet.
273 LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
274 LockBuffer(rootbuf, BT_READ);
276 /* okay, metadata is correct, write and release it */
277 _bt_wrtbuf(rel, metabuf);
281 rootblkno = metad->btm_fastroot;
282 Assert(rootblkno != P_NONE);
283 rootlevel = metad->btm_fastlevel;
286 * We are done with the metapage; arrange to release it via first
287 * _bt_relandgetbuf call
293 rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
294 rootpage = BufferGetPage(rootbuf);
295 rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
297 if (!P_IGNORE(rootopaque))
300 /* it's dead, Jim. step right one page */
301 if (P_RIGHTMOST(rootopaque))
302 elog(ERROR, "no live root page found in \"%s\"",
303 RelationGetRelationName(rel));
304 rootblkno = rootopaque->btpo_next;
307 /* Note: can't check btpo.level on deleted pages */
308 if (rootopaque->btpo.level != rootlevel)
309 elog(ERROR, "root page %u of \"%s\" has level %u, expected %u",
310 rootblkno, RelationGetRelationName(rel),
311 rootopaque->btpo.level, rootlevel);
315 * By here, we have a pin and read lock on the root page, and no lock set
316 * on the metadata page. Return the root page's buffer.
322 * _bt_gettrueroot() -- Get the true root page of the btree.
324 * This is the same as the BT_READ case of _bt_getroot(), except
325 * we follow the true-root link not the fast-root link.
327 * By the time we acquire lock on the root page, it might have been split and
328 * not be the true root anymore. This is okay for the present uses of this
329 * routine; we only really need to be able to move up at least one tree level
330 * from whatever non-root page we were at. If we ever do need to lock the
331 * one true root page, we could loop here, re-reading the metapage on each
332 * failure. (Note that it wouldn't do to hold the lock on the metapage while
333 * moving to the root --- that'd deadlock against any concurrent root split.)
336 _bt_gettrueroot(Relation rel)
340 BTPageOpaque metaopaque;
343 BTPageOpaque rootopaque;
344 BlockNumber rootblkno;
346 BTMetaPageData *metad;
348 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
349 metapg = BufferGetPage(metabuf);
350 metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg);
351 metad = BTPageGetMeta(metapg);
353 if (!(metaopaque->btpo_flags & BTP_META) ||
354 metad->btm_magic != BTREE_MAGIC)
356 (errcode(ERRCODE_INDEX_CORRUPTED),
357 errmsg("index \"%s\" is not a btree",
358 RelationGetRelationName(rel))));
360 if (metad->btm_version != BTREE_VERSION)
362 (errcode(ERRCODE_INDEX_CORRUPTED),
363 errmsg("version mismatch in index \"%s\": file version %d, code version %d",
364 RelationGetRelationName(rel),
365 metad->btm_version, BTREE_VERSION)));
367 /* if no root page initialized yet, fail */
368 if (metad->btm_root == P_NONE)
370 _bt_relbuf(rel, metabuf);
371 return InvalidBuffer;
374 rootblkno = metad->btm_root;
375 rootlevel = metad->btm_level;
378 * We are done with the metapage; arrange to release it via first
379 * _bt_relandgetbuf call
385 rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
386 rootpage = BufferGetPage(rootbuf);
387 rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
389 if (!P_IGNORE(rootopaque))
392 /* it's dead, Jim. step right one page */
393 if (P_RIGHTMOST(rootopaque))
394 elog(ERROR, "no live root page found in \"%s\"",
395 RelationGetRelationName(rel));
396 rootblkno = rootopaque->btpo_next;
399 /* Note: can't check btpo.level on deleted pages */
400 if (rootopaque->btpo.level != rootlevel)
401 elog(ERROR, "root page %u of \"%s\" has level %u, expected %u",
402 rootblkno, RelationGetRelationName(rel),
403 rootopaque->btpo.level, rootlevel);
409 * _bt_getbuf() -- Get a buffer by block number for read or write.
411 * blkno == P_NEW means to get an unallocated index page.
413 * When this routine returns, the appropriate lock is set on the
414 * requested buffer and its reference count has been incremented
415 * (ie, the buffer is "locked and pinned").
418 _bt_getbuf(Relation rel, BlockNumber blkno, int access)
424 /* Read an existing block of the relation */
425 buf = ReadBuffer(rel, blkno);
426 LockBuffer(buf, access);
433 Assert(access == BT_WRITE);
436 * First see if the FSM knows of any free pages.
438 * We can't trust the FSM's report unreservedly; we have to check that
439 * the page is still free. (For example, an already-free page could
440 * have been re-used between the time the last VACUUM scanned it and
441 * the time the VACUUM made its FSM updates.)
443 * In fact, it's worse than that: we can't even assume that it's safe to
444 * take a lock on the reported page. If somebody else has a lock on
445 * it, or even worse our own caller does, we could deadlock. (The
446 * own-caller scenario is actually not improbable. Consider an index
447 * on a serial or timestamp column. Nearly all splits will be at the
448 * rightmost page, so it's entirely likely that _bt_split will call us
449 * while holding a lock on the page most recently acquired from FSM.
450 * A VACUUM running concurrently with the previous split could well
451 * have placed that page back in FSM.)
453 * To get around that, we ask for only a conditional lock on the reported
454 * page. If we fail, then someone else is using the page, and we may
455 * reasonably assume it's not free. (If we happen to be wrong, the
456 * worst consequence is the page will be lost to use till the next
457 * VACUUM, which is no big problem.)
461 blkno = GetFreeIndexPage(&rel->rd_node);
462 if (blkno == InvalidBlockNumber)
464 buf = ReadBuffer(rel, blkno);
465 if (ConditionalLockBuffer(buf))
467 page = BufferGetPage(buf);
468 if (_bt_page_recyclable(page))
470 /* Okay to use page. Re-initialize and return it */
471 _bt_pageinit(page, BufferGetPageSize(buf));
474 elog(DEBUG2, "FSM returned nonrecyclable page");
475 _bt_relbuf(rel, buf);
479 elog(DEBUG2, "FSM returned nonlockable page");
480 /* couldn't get lock, so just drop pin */
486 * Extend the relation by one page.
488 * We have to use a lock to ensure no one else is extending the rel at
489 * the same time, else we will both try to initialize the same new
490 * page. We can skip locking for new or temp relations, however,
491 * since no one else could be accessing them.
493 needLock = !RELATION_IS_LOCAL(rel);
496 LockRelationForExtension(rel, ExclusiveLock);
498 buf = ReadBuffer(rel, P_NEW);
500 /* Acquire buffer lock on new page */
501 LockBuffer(buf, BT_WRITE);
504 * Release the file-extension lock; it's now OK for someone else to
505 * extend the relation some more. Note that we cannot release this
506 * lock before we have buffer lock on the new page, or we risk a race
507 * condition against btvacuumcleanup --- see comments therein.
510 UnlockRelationForExtension(rel, ExclusiveLock);
512 /* Initialize the new page before returning it */
513 page = BufferGetPage(buf);
514 Assert(PageIsNew((PageHeader) page));
515 _bt_pageinit(page, BufferGetPageSize(buf));
518 /* ref count and lock type are correct */
523 * _bt_relandgetbuf() -- release a locked buffer and get another one.
525 * This is equivalent to _bt_relbuf followed by _bt_getbuf, with the
526 * exception that blkno may not be P_NEW. Also, if obuf is InvalidBuffer
527 * then it reduces to just _bt_getbuf; allowing this case simplifies some
528 * callers. The motivation for using this is to avoid two entries to the
529 * bufmgr when one will do.
532 _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
536 Assert(blkno != P_NEW);
537 if (BufferIsValid(obuf))
538 LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
539 buf = ReleaseAndReadBuffer(obuf, rel, blkno);
540 LockBuffer(buf, access);
545 * _bt_relbuf() -- release a locked buffer.
547 * Lock and pin (refcount) are both dropped. Note that either read or
548 * write lock can be dropped this way, but if we modified the buffer,
549 * this is NOT the right way to release a write lock.
552 _bt_relbuf(Relation rel, Buffer buf)
554 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
559 * _bt_wrtbuf() -- write a btree page to disk.
561 * This routine releases the lock held on the buffer and our refcount
562 * for it. It is an error to call _bt_wrtbuf() without a write lock
563 * and a pin on the buffer.
565 * NOTE: actually, the buffer manager just marks the shared buffer page
566 * dirty here; the real I/O happens later. This is okay since we are not
567 * relying on write ordering anyway. The WAL mechanism is responsible for
568 * guaranteeing correctness after a crash.
571 _bt_wrtbuf(Relation rel, Buffer buf)
573 LockBuffer(buf, BUFFER_LOCK_UNLOCK);
578 * _bt_wrtnorelbuf() -- write a btree page to disk, but do not release
579 * our reference or lock.
581 * It is an error to call _bt_wrtnorelbuf() without a write lock
582 * and a pin on the buffer.
587 _bt_wrtnorelbuf(Relation rel, Buffer buf)
589 WriteNoReleaseBuffer(buf);
593 * _bt_pageinit() -- Initialize a new page.
595 * On return, the page header is initialized; data space is empty;
596 * special space is zeroed out.
599 _bt_pageinit(Page page, Size size)
601 PageInit(page, size, sizeof(BTPageOpaqueData));
605 * _bt_page_recyclable() -- Is an existing page recyclable?
607 * This exists to make sure _bt_getbuf and btvacuumcleanup have the same
608 * policy about whether a page is safe to re-use.
611 _bt_page_recyclable(Page page)
616 * It's possible to find an all-zeroes page in an index --- for example, a
617 * backend might successfully extend the relation one page and then crash
618 * before it is able to make a WAL entry for adding the page. If we find a
619 * zeroed page then reclaim it.
625 * Otherwise, recycle if deleted and too old to have any processes
628 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
629 if (P_ISDELETED(opaque) &&
630 TransactionIdPrecedesOrEquals(opaque->btpo.xact, RecentXmin))
636 * Delete item(s) from a btree page.
638 * This must only be used for deleting leaf items. Deleting an item on a
639 * non-leaf page has to be done as part of an atomic action that includes
640 * deleting the page it points to.
642 * This routine assumes that the caller has pinned and locked the buffer,
643 * and will write the buffer afterwards. Also, the given itemnos *must*
644 * appear in increasing order in the array.
647 _bt_delitems(Relation rel, Buffer buf,
648 OffsetNumber *itemnos, int nitems)
650 Page page = BufferGetPage(buf);
652 /* No ereport(ERROR) until changes are logged */
653 START_CRIT_SECTION();
656 PageIndexMultiDelete(page, itemnos, nitems);
661 xl_btree_delete xlrec;
663 XLogRecData rdata[2];
665 xlrec.node = rel->rd_node;
666 xlrec.block = BufferGetBlockNumber(buf);
668 rdata[0].data = (char *) &xlrec;
669 rdata[0].len = SizeOfBtreeDelete;
670 rdata[0].buffer = InvalidBuffer;
671 rdata[0].next = &(rdata[1]);
674 * The target-offsets array is not in the buffer, but pretend that it
675 * is. When XLogInsert stores the whole buffer, the offsets array
676 * need not be stored too.
680 rdata[1].data = (char *) itemnos;
681 rdata[1].len = nitems * sizeof(OffsetNumber);
685 rdata[1].data = NULL;
688 rdata[1].buffer = buf;
689 rdata[1].buffer_std = true;
690 rdata[1].next = NULL;
692 recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE, rdata);
694 PageSetLSN(page, recptr);
695 PageSetTLI(page, ThisTimeLineID);
702 * _bt_pagedel() -- Delete a page from the b-tree.
704 * This action unlinks the page from the b-tree structure, removing all
705 * pointers leading to it --- but not touching its own left and right links.
706 * The page cannot be physically reclaimed right away, since other processes
707 * may currently be trying to follow links leading to the page; they have to
708 * be allowed to use its right-link to recover. See nbtree/README.
710 * On entry, the target buffer must be pinned and read-locked. This lock and
711 * pin will be dropped before exiting.
713 * Returns the number of pages successfully deleted (zero on failure; could
714 * be more than one if parent blocks were deleted).
716 * NOTE: this leaks memory. Rather than trying to clean up everything
717 * carefully, it's better to run it in a temp context that can be reset
721 _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
727 OffsetNumber poffset,
734 ScanKey itup_scankey;
739 bool parent_half_dead;
740 bool parent_one_child;
742 Buffer metabuf = InvalidBuffer;
744 BTMetaPageData *metad = NULL;
749 * We can never delete rightmost pages nor root pages. While at it, check
750 * that page is not already deleted and is empty.
752 page = BufferGetPage(buf);
753 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
754 if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
755 P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
757 _bt_relbuf(rel, buf);
762 * Save info about page, including a copy of its high key (it must have
763 * one, being non-rightmost).
765 target = BufferGetBlockNumber(buf);
766 targetlevel = opaque->btpo.level;
767 leftsib = opaque->btpo_prev;
768 itemid = PageGetItemId(page, P_HIKEY);
769 targetkey = CopyBTItem((BTItem) PageGetItem(page, itemid));
772 * We need to get an approximate pointer to the page's parent page. Use
773 * the standard search mechanism to search for the page's high key; this
774 * will give us a link to either the current parent or someplace to its
775 * left (if there are multiple equal high keys). To avoid deadlocks, we'd
776 * better drop the target page lock first.
778 _bt_relbuf(rel, buf);
779 /* we need a scan key to do our search, so build one */
780 itup_scankey = _bt_mkscankey(rel, &(targetkey->bti_itup));
781 /* find the leftmost leaf page containing this key */
782 stack = _bt_search(rel, rel->rd_rel->relnatts, itup_scankey, false,
784 /* don't need a pin on that either */
785 _bt_relbuf(rel, lbuf);
788 * If we are trying to delete an interior page, _bt_search did more than
789 * we needed. Locate the stack item pointing to our parent level.
795 elog(ERROR, "not enough stack items");
796 if (ilevel == targetlevel)
798 stack = stack->bts_parent;
803 * We have to lock the pages we need to modify in the standard order:
804 * moving right, then up. Else we will deadlock against other writers.
806 * So, we need to find and write-lock the current left sibling of the target
807 * page. The sibling that was current a moment ago could have split, so
808 * we may have to move right. This search could fail if either the
809 * sibling or the target page was deleted by someone else meanwhile; if
810 * so, give up. (Right now, that should never happen, since page deletion
811 * is only done in VACUUM and there shouldn't be multiple VACUUMs
812 * concurrently on the same table.)
814 if (leftsib != P_NONE)
816 lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
817 page = BufferGetPage(lbuf);
818 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
819 while (P_ISDELETED(opaque) || opaque->btpo_next != target)
821 /* step right one page */
822 leftsib = opaque->btpo_next;
823 _bt_relbuf(rel, lbuf);
824 if (leftsib == P_NONE)
826 elog(LOG, "no left sibling (concurrent deletion?) in \"%s\"",
827 RelationGetRelationName(rel));
830 lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
831 page = BufferGetPage(lbuf);
832 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
836 lbuf = InvalidBuffer;
839 * Next write-lock the target page itself. It should be okay to take just
840 * a write lock not a superexclusive lock, since no scans would stop on an
843 buf = _bt_getbuf(rel, target, BT_WRITE);
844 page = BufferGetPage(buf);
845 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
848 * Check page is still empty etc, else abandon deletion. The empty check
849 * is necessary since someone else might have inserted into it while we
850 * didn't have it locked; the others are just for paranoia's sake.
852 if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
853 P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
855 _bt_relbuf(rel, buf);
856 if (BufferIsValid(lbuf))
857 _bt_relbuf(rel, lbuf);
860 if (opaque->btpo_prev != leftsib)
861 elog(ERROR, "left link changed unexpectedly in block %u of \"%s\"",
862 target, RelationGetRelationName(rel));
865 * And next write-lock the (current) right sibling.
867 rightsib = opaque->btpo_next;
868 rbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
871 * Next find and write-lock the current parent of the target page. This is
872 * essentially the same as the corresponding step of splitting.
874 ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
876 pbuf = _bt_getstackbuf(rel, stack, BT_WRITE);
877 if (pbuf == InvalidBuffer)
878 elog(ERROR, "failed to re-find parent key in \"%s\"",
879 RelationGetRelationName(rel));
880 parent = stack->bts_blkno;
881 poffset = stack->bts_offset;
884 * If the target is the rightmost child of its parent, then we can't
885 * delete, unless it's also the only child --- in which case the parent
886 * changes to half-dead status.
888 page = BufferGetPage(pbuf);
889 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
890 maxoff = PageGetMaxOffsetNumber(page);
891 parent_half_dead = false;
892 parent_one_child = false;
893 if (poffset >= maxoff)
895 if (poffset == P_FIRSTDATAKEY(opaque))
896 parent_half_dead = true;
899 _bt_relbuf(rel, pbuf);
900 _bt_relbuf(rel, rbuf);
901 _bt_relbuf(rel, buf);
902 if (BufferIsValid(lbuf))
903 _bt_relbuf(rel, lbuf);
909 /* Will there be exactly one child left in this parent? */
910 if (OffsetNumberNext(P_FIRSTDATAKEY(opaque)) == maxoff)
911 parent_one_child = true;
915 * If we are deleting the next-to-last page on the target's level, then
916 * the rightsib is a candidate to become the new fast root. (In theory, it
917 * might be possible to push the fast root even further down, but the odds
918 * of doing so are slim, and the locking considerations daunting.)
920 * We can safely acquire a lock on the metapage here --- see comments for
923 if (leftsib == P_NONE)
925 page = BufferGetPage(rbuf);
926 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
927 Assert(opaque->btpo.level == targetlevel);
928 if (P_RIGHTMOST(opaque))
930 /* rightsib will be the only one left on the level */
931 metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
932 metapg = BufferGetPage(metabuf);
933 metad = BTPageGetMeta(metapg);
936 * The expected case here is btm_fastlevel == targetlevel+1; if
937 * the fastlevel is <= targetlevel, something is wrong, and we
938 * choose to overwrite it to fix it.
940 if (metad->btm_fastlevel > targetlevel + 1)
942 /* no update wanted */
943 _bt_relbuf(rel, metabuf);
944 metabuf = InvalidBuffer;
950 * Here we begin doing the deletion.
953 /* No ereport(ERROR) until changes are logged */
954 START_CRIT_SECTION();
957 * Update parent. The normal case is a tad tricky because we want to
958 * delete the target's downlink and the *following* key. Easiest way is
959 * to copy the right sibling's downlink over the target downlink, and then
960 * delete the following item.
962 page = BufferGetPage(pbuf);
963 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
964 if (parent_half_dead)
966 PageIndexTupleDelete(page, poffset);
967 opaque->btpo_flags |= BTP_HALF_DEAD;
971 OffsetNumber nextoffset;
973 itemid = PageGetItemId(page, poffset);
974 btitem = (BTItem) PageGetItem(page, itemid);
975 Assert(ItemPointerGetBlockNumber(&(btitem->bti_itup.t_tid)) == target);
976 ItemPointerSet(&(btitem->bti_itup.t_tid), rightsib, P_HIKEY);
978 nextoffset = OffsetNumberNext(poffset);
979 /* This part is just for double-checking */
980 itemid = PageGetItemId(page, nextoffset);
981 btitem = (BTItem) PageGetItem(page, itemid);
982 if (ItemPointerGetBlockNumber(&(btitem->bti_itup.t_tid)) != rightsib)
983 elog(PANIC, "right sibling is not next child in \"%s\"",
984 RelationGetRelationName(rel));
985 PageIndexTupleDelete(page, nextoffset);
989 * Update siblings' side-links. Note the target page's side-links will
990 * continue to point to the siblings.
992 if (BufferIsValid(lbuf))
994 page = BufferGetPage(lbuf);
995 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
996 Assert(opaque->btpo_next == target);
997 opaque->btpo_next = rightsib;
999 page = BufferGetPage(rbuf);
1000 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1001 Assert(opaque->btpo_prev == target);
1002 opaque->btpo_prev = leftsib;
1003 rightsib_empty = (P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page));
1006 * Mark the page itself deleted. It can be recycled when all current
1007 * transactions are gone; or immediately if we're doing VACUUM FULL.
1009 page = BufferGetPage(buf);
1010 opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1011 opaque->btpo_flags |= BTP_DELETED;
1013 vacuum_full ? FrozenTransactionId : ReadNewTransactionId();
1015 /* And update the metapage, if needed */
1016 if (BufferIsValid(metabuf))
1018 metad->btm_fastroot = rightsib;
1019 metad->btm_fastlevel = targetlevel;
1023 if (!rel->rd_istemp)
1025 xl_btree_delete_page xlrec;
1026 xl_btree_metadata xlmeta;
1029 XLogRecData rdata[5];
1030 XLogRecData *nextrdata;
1032 xlrec.target.node = rel->rd_node;
1033 ItemPointerSet(&(xlrec.target.tid), parent, poffset);
1034 xlrec.deadblk = target;
1035 xlrec.leftblk = leftsib;
1036 xlrec.rightblk = rightsib;
1038 rdata[0].data = (char *) &xlrec;
1039 rdata[0].len = SizeOfBtreeDeletePage;
1040 rdata[0].buffer = InvalidBuffer;
1041 rdata[0].next = nextrdata = &(rdata[1]);
1043 if (BufferIsValid(metabuf))
1045 xlmeta.root = metad->btm_root;
1046 xlmeta.level = metad->btm_level;
1047 xlmeta.fastroot = metad->btm_fastroot;
1048 xlmeta.fastlevel = metad->btm_fastlevel;
1050 nextrdata->data = (char *) &xlmeta;
1051 nextrdata->len = sizeof(xl_btree_metadata);
1052 nextrdata->buffer = InvalidBuffer;
1053 nextrdata->next = nextrdata + 1;
1055 xlinfo = XLOG_BTREE_DELETE_PAGE_META;
1058 xlinfo = XLOG_BTREE_DELETE_PAGE;
1060 nextrdata->data = NULL;
1062 nextrdata->next = nextrdata + 1;
1063 nextrdata->buffer = pbuf;
1064 nextrdata->buffer_std = true;
1067 nextrdata->data = NULL;
1069 nextrdata->buffer = rbuf;
1070 nextrdata->buffer_std = true;
1071 nextrdata->next = NULL;
1073 if (BufferIsValid(lbuf))
1075 nextrdata->next = nextrdata + 1;
1077 nextrdata->data = NULL;
1079 nextrdata->buffer = lbuf;
1080 nextrdata->buffer_std = true;
1081 nextrdata->next = NULL;
1084 recptr = XLogInsert(RM_BTREE_ID, xlinfo, rdata);
1086 if (BufferIsValid(metabuf))
1088 PageSetLSN(metapg, recptr);
1089 PageSetTLI(metapg, ThisTimeLineID);
1091 page = BufferGetPage(pbuf);
1092 PageSetLSN(page, recptr);
1093 PageSetTLI(page, ThisTimeLineID);
1094 page = BufferGetPage(rbuf);
1095 PageSetLSN(page, recptr);
1096 PageSetTLI(page, ThisTimeLineID);
1097 page = BufferGetPage(buf);
1098 PageSetLSN(page, recptr);
1099 PageSetTLI(page, ThisTimeLineID);
1100 if (BufferIsValid(lbuf))
1102 page = BufferGetPage(lbuf);
1103 PageSetLSN(page, recptr);
1104 PageSetTLI(page, ThisTimeLineID);
1110 /* Write and release buffers */
1111 if (BufferIsValid(metabuf))
1112 _bt_wrtbuf(rel, metabuf);
1113 _bt_wrtbuf(rel, pbuf);
1114 _bt_wrtbuf(rel, rbuf);
1115 _bt_wrtbuf(rel, buf);
1116 if (BufferIsValid(lbuf))
1117 _bt_wrtbuf(rel, lbuf);
1120 * If parent became half dead, recurse to try to delete it. Otherwise, if
1121 * right sibling is empty and is now the last child of the parent, recurse
1122 * to try to delete it. (These cases cannot apply at the same time,
1123 * though the second case might itself recurse to the first.)
1125 if (parent_half_dead)
1127 buf = _bt_getbuf(rel, parent, BT_READ);
1128 return _bt_pagedel(rel, buf, vacuum_full) + 1;
1130 if (parent_one_child && rightsib_empty)
1132 buf = _bt_getbuf(rel, rightsib, BT_READ);
1133 return _bt_pagedel(rel, buf, vacuum_full) + 1;