1 /*-------------------------------------------------------------------------
4 * Routines to support bitmapped scans of relations
6 * NOTE: it is critical that this plan type only be used with MVCC-compliant
7 * snapshots (ie, regular snapshots, not SnapshotNow or one of the other
8 * special snapshots). The reason is that since index and heap scans are
9 * decoupled, there can be no assurance that the index tuple prompting a
10 * visit to a particular heap TID still exists when the visit is made.
11 * Therefore the tuple might not exist anymore either (which is OK because
12 * heap_fetch will cope) --- but worse, the tuple slot could have been
13 * re-used for a newer tuple. With an MVCC snapshot the newer tuple is
14 * certain to fail the time qual and so it will not be mistakenly returned.
15 * With SnapshotNow we might return a tuple that doesn't meet the required
16 * index qual conditions.
19 * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
20 * Portions Copyright (c) 1994, Regents of the University of California
24 * src/backend/executor/nodeBitmapHeapscan.c
26 *-------------------------------------------------------------------------
30 * ExecBitmapHeapScan scans a relation using bitmap info
31 * ExecBitmapHeapNext workhorse for above
32 * ExecInitBitmapHeapScan creates and initializes state info.
33 * ExecReScanBitmapHeapScan prepares to rescan the plan.
34 * ExecEndBitmapHeapScan releases all storage.
38 #include "access/heapam.h"
39 #include "access/relscan.h"
40 #include "access/transam.h"
41 #include "executor/execdebug.h"
42 #include "executor/nodeBitmapHeapscan.h"
44 #include "storage/bufmgr.h"
45 #include "storage/predicate.h"
46 #include "utils/memutils.h"
47 #include "utils/snapmgr.h"
48 #include "utils/tqual.h"
51 static TupleTableSlot *BitmapHeapNext(BitmapHeapScanState *node);
52 static void bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres);
55 /* ----------------------------------------------------------------
58 * Retrieve next tuple from the BitmapHeapScan node's currentRelation
59 * ----------------------------------------------------------------
61 static TupleTableSlot *
62 BitmapHeapNext(BitmapHeapScanState *node)
64 ExprContext *econtext;
67 TBMIterator *tbmiterator;
68 TBMIterateResult *tbmres;
69 TBMIterator *prefetch_iterator;
70 OffsetNumber targoffset;
74 * extract necessary information from index scan node
76 econtext = node->ss.ps.ps_ExprContext;
77 slot = node->ss.ss_ScanTupleSlot;
78 scan = node->ss.ss_currentScanDesc;
80 tbmiterator = node->tbmiterator;
81 tbmres = node->tbmres;
82 prefetch_iterator = node->prefetch_iterator;
85 * If we haven't yet performed the underlying index scan, do it, and begin
86 * the iteration over the bitmap.
88 * For prefetching, we use *two* iterators, one for the pages we are
89 * actually scanning and another that runs ahead of the first for
90 * prefetching. node->prefetch_pages tracks exactly how many pages ahead
91 * the prefetch iterator is. Also, node->prefetch_target tracks the
92 * desired prefetch distance, which starts small and increases up to the
93 * GUC-controlled maximum, target_prefetch_pages. This is to avoid doing
94 * a lot of prefetching in a scan that stops after a few tuples because of
99 tbm = (TIDBitmap *) MultiExecProcNode(outerPlanState(node));
101 if (!tbm || !IsA(tbm, TIDBitmap))
102 elog(ERROR, "unrecognized result from subplan");
105 node->tbmiterator = tbmiterator = tbm_begin_iterate(tbm);
106 node->tbmres = tbmres = NULL;
109 if (target_prefetch_pages > 0)
111 node->prefetch_iterator = prefetch_iterator = tbm_begin_iterate(tbm);
112 node->prefetch_pages = 0;
113 node->prefetch_target = -1;
115 #endif /* USE_PREFETCH */
124 * Get next page of results if needed
128 node->tbmres = tbmres = tbm_iterate(tbmiterator);
131 /* no more entries in the bitmap */
136 if (node->prefetch_pages > 0)
138 /* The main iterator has closed the distance by one page */
139 node->prefetch_pages--;
141 else if (prefetch_iterator)
143 /* Do not let the prefetch iterator get behind the main one */
144 TBMIterateResult *tbmpre = tbm_iterate(prefetch_iterator);
146 if (tbmpre == NULL || tbmpre->blockno != tbmres->blockno)
147 elog(ERROR, "prefetch and main iterators are out of sync");
149 #endif /* USE_PREFETCH */
152 * Ignore any claimed entries past what we think is the end of the
153 * relation. (This is probably not necessary given that we got at
154 * least AccessShareLock on the table before performing any of the
155 * indexscans, but let's be safe.)
157 if (tbmres->blockno >= scan->rs_nblocks)
159 node->tbmres = tbmres = NULL;
164 * Fetch the current heap page and identify candidate tuples.
166 bitgetpage(scan, tbmres);
169 * Set rs_cindex to first slot to examine
176 * Increase prefetch target if it's not yet at the max. Note that
177 * we will increase it to zero after fetching the very first
178 * page/tuple, then to one after the second tuple is fetched, then
179 * it doubles as later pages are fetched.
181 if (node->prefetch_target >= target_prefetch_pages)
182 /* don't increase any further */ ;
183 else if (node->prefetch_target >= target_prefetch_pages / 2)
184 node->prefetch_target = target_prefetch_pages;
185 else if (node->prefetch_target > 0)
186 node->prefetch_target *= 2;
188 node->prefetch_target++;
189 #endif /* USE_PREFETCH */
194 * Continuing in previously obtained page; advance rs_cindex
201 * Try to prefetch at least a few pages even before we get to the
202 * second page if we don't stop reading after the first tuple.
204 if (node->prefetch_target < target_prefetch_pages)
205 node->prefetch_target++;
206 #endif /* USE_PREFETCH */
210 * Out of range? If so, nothing more to look at on this page
212 if (scan->rs_cindex < 0 || scan->rs_cindex >= scan->rs_ntuples)
214 node->tbmres = tbmres = NULL;
221 * We issue prefetch requests *after* fetching the current page to try
222 * to avoid having prefetching interfere with the main I/O. Also, this
223 * should happen only when we have determined there is still something
224 * to do on the current page, else we may uselessly prefetch the same
225 * page we are just about to request for real.
227 if (prefetch_iterator)
229 while (node->prefetch_pages < node->prefetch_target)
231 TBMIterateResult *tbmpre = tbm_iterate(prefetch_iterator);
235 /* No more pages to prefetch */
236 tbm_end_iterate(prefetch_iterator);
237 node->prefetch_iterator = prefetch_iterator = NULL;
240 node->prefetch_pages++;
241 PrefetchBuffer(scan->rs_rd, MAIN_FORKNUM, tbmpre->blockno);
244 #endif /* USE_PREFETCH */
247 * Okay to fetch the tuple
249 targoffset = scan->rs_vistuples[scan->rs_cindex];
250 dp = (Page) BufferGetPage(scan->rs_cbuf);
251 lp = PageGetItemId(dp, targoffset);
252 Assert(ItemIdIsNormal(lp));
254 scan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
255 scan->rs_ctup.t_len = ItemIdGetLength(lp);
256 ItemPointerSet(&scan->rs_ctup.t_self, tbmres->blockno, targoffset);
258 pgstat_count_heap_fetch(scan->rs_rd);
261 * Set up the result slot to point to this tuple. Note that the slot
262 * acquires a pin on the buffer.
264 ExecStoreTuple(&scan->rs_ctup,
270 * If we are using lossy info, we have to recheck the qual conditions
275 econtext->ecxt_scantuple = slot;
276 ResetExprContext(econtext);
278 if (!ExecQual(node->bitmapqualorig, econtext, false))
280 /* Fails recheck, so drop it and loop back for another */
281 ExecClearTuple(slot);
286 /* OK to return this tuple */
291 * if we get here it means we are at the end of the scan..
293 return ExecClearTuple(slot);
297 * bitgetpage - subroutine for BitmapHeapNext()
299 * This routine reads and pins the specified page of the relation, then
300 * builds an array indicating which tuples on the page are both potentially
301 * interesting according to the bitmap, and visible according to the snapshot.
304 bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
306 BlockNumber page = tbmres->blockno;
312 * Acquire pin on the target heap page, trading in any pin we held before.
314 Assert(page < scan->rs_nblocks);
316 scan->rs_cbuf = ReleaseAndReadBuffer(scan->rs_cbuf,
319 buffer = scan->rs_cbuf;
320 snapshot = scan->rs_snapshot;
325 * Prune and repair fragmentation for the whole page, if possible.
327 Assert(TransactionIdIsValid(RecentGlobalXmin));
328 heap_page_prune_opt(scan->rs_rd, buffer, RecentGlobalXmin);
331 * We must hold share lock on the buffer content while examining tuple
332 * visibility. Afterwards, however, the tuples we have found to be
333 * visible are guaranteed good as long as we hold the buffer pin.
335 LockBuffer(buffer, BUFFER_LOCK_SHARE);
338 * We need two separate strategies for lossy and non-lossy cases.
340 if (tbmres->ntuples >= 0)
343 * Bitmap is non-lossy, so we just look through the offsets listed in
344 * tbmres; but we have to follow any HOT chain starting at each such
349 for (curslot = 0; curslot < tbmres->ntuples; curslot++)
351 OffsetNumber offnum = tbmres->offsets[curslot];
353 HeapTupleData heapTuple;
355 ItemPointerSet(&tid, page, offnum);
356 if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
357 &heapTuple, NULL, true))
358 scan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
364 * Bitmap is lossy, so we must examine each item pointer on the page.
365 * But we can ignore HOT chains, since we'll check each tuple anyway.
367 Page dp = (Page) BufferGetPage(buffer);
368 OffsetNumber maxoff = PageGetMaxOffsetNumber(dp);
371 for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
374 HeapTupleData loctup;
377 lp = PageGetItemId(dp, offnum);
378 if (!ItemIdIsNormal(lp))
380 loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
381 loctup.t_len = ItemIdGetLength(lp);
382 loctup.t_tableOid = scan->rs_rd->rd_id;
383 ItemPointerSet(&loctup.t_self, page, offnum);
384 valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
387 scan->rs_vistuples[ntup++] = offnum;
388 PredicateLockTuple(scan->rs_rd, &loctup, snapshot);
390 CheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
395 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
397 Assert(ntup <= MaxHeapTuplesPerPage);
398 scan->rs_ntuples = ntup;
402 * BitmapHeapRecheck -- access method routine to recheck a tuple in EvalPlanQual
405 BitmapHeapRecheck(BitmapHeapScanState *node, TupleTableSlot *slot)
407 ExprContext *econtext;
410 * extract necessary information from index scan node
412 econtext = node->ss.ps.ps_ExprContext;
414 /* Does the tuple meet the original qual conditions? */
415 econtext->ecxt_scantuple = slot;
417 ResetExprContext(econtext);
419 return ExecQual(node->bitmapqualorig, econtext, false);
422 /* ----------------------------------------------------------------
423 * ExecBitmapHeapScan(node)
424 * ----------------------------------------------------------------
427 ExecBitmapHeapScan(BitmapHeapScanState *node)
429 return ExecScan(&node->ss,
430 (ExecScanAccessMtd) BitmapHeapNext,
431 (ExecScanRecheckMtd) BitmapHeapRecheck);
434 /* ----------------------------------------------------------------
435 * ExecReScanBitmapHeapScan(node)
436 * ----------------------------------------------------------------
439 ExecReScanBitmapHeapScan(BitmapHeapScanState *node)
441 /* rescan to release any page pin */
442 heap_rescan(node->ss.ss_currentScanDesc, NULL);
444 if (node->tbmiterator)
445 tbm_end_iterate(node->tbmiterator);
446 if (node->prefetch_iterator)
447 tbm_end_iterate(node->prefetch_iterator);
451 node->tbmiterator = NULL;
453 node->prefetch_iterator = NULL;
455 ExecScanReScan(&node->ss);
458 * if chgParam of subnode is not null then plan will be re-scanned by
459 * first ExecProcNode.
461 if (node->ss.ps.lefttree->chgParam == NULL)
462 ExecReScan(node->ss.ps.lefttree);
465 /* ----------------------------------------------------------------
466 * ExecEndBitmapHeapScan
467 * ----------------------------------------------------------------
470 ExecEndBitmapHeapScan(BitmapHeapScanState *node)
473 HeapScanDesc scanDesc;
476 * extract information from the node
478 relation = node->ss.ss_currentRelation;
479 scanDesc = node->ss.ss_currentScanDesc;
482 * Free the exprcontext
484 ExecFreeExprContext(&node->ss.ps);
487 * clear out tuple table slots
489 ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
490 ExecClearTuple(node->ss.ss_ScanTupleSlot);
493 * close down subplans
495 ExecEndNode(outerPlanState(node));
498 * release bitmap if any
500 if (node->tbmiterator)
501 tbm_end_iterate(node->tbmiterator);
502 if (node->prefetch_iterator)
503 tbm_end_iterate(node->prefetch_iterator);
510 heap_endscan(scanDesc);
513 * close the heap relation.
515 ExecCloseScanRelation(relation);
518 /* ----------------------------------------------------------------
519 * ExecInitBitmapHeapScan
521 * Initializes the scan's state information.
522 * ----------------------------------------------------------------
524 BitmapHeapScanState *
525 ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
527 BitmapHeapScanState *scanstate;
528 Relation currentRelation;
530 /* check for unsupported flags */
531 Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
534 * Assert caller didn't ask for an unsafe snapshot --- see comments at
537 Assert(IsMVCCSnapshot(estate->es_snapshot));
540 * create state structure
542 scanstate = makeNode(BitmapHeapScanState);
543 scanstate->ss.ps.plan = (Plan *) node;
544 scanstate->ss.ps.state = estate;
546 scanstate->tbm = NULL;
547 scanstate->tbmiterator = NULL;
548 scanstate->tbmres = NULL;
549 scanstate->prefetch_iterator = NULL;
550 scanstate->prefetch_pages = 0;
551 scanstate->prefetch_target = 0;
554 * Miscellaneous initialization
556 * create expression context for node
558 ExecAssignExprContext(estate, &scanstate->ss.ps);
560 scanstate->ss.ps.ps_TupFromTlist = false;
563 * initialize child expressions
565 scanstate->ss.ps.targetlist = (List *)
566 ExecInitExpr((Expr *) node->scan.plan.targetlist,
567 (PlanState *) scanstate);
568 scanstate->ss.ps.qual = (List *)
569 ExecInitExpr((Expr *) node->scan.plan.qual,
570 (PlanState *) scanstate);
571 scanstate->bitmapqualorig = (List *)
572 ExecInitExpr((Expr *) node->bitmapqualorig,
573 (PlanState *) scanstate);
576 * tuple table initialization
578 ExecInitResultTupleSlot(estate, &scanstate->ss.ps);
579 ExecInitScanTupleSlot(estate, &scanstate->ss);
582 * open the base relation and acquire appropriate lock on it.
584 currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid);
586 scanstate->ss.ss_currentRelation = currentRelation;
589 * Even though we aren't going to do a conventional seqscan, it is useful
590 * to create a HeapScanDesc --- most of the fields in it are usable.
592 scanstate->ss.ss_currentScanDesc = heap_beginscan_bm(currentRelation,
598 * get the scan type from the relation descriptor.
600 ExecAssignScanType(&scanstate->ss, RelationGetDescr(currentRelation));
603 * Initialize result tuple type and projection info.
605 ExecAssignResultTypeFromTL(&scanstate->ss.ps);
606 ExecAssignScanProjectionInfo(&scanstate->ss);
609 * initialize child nodes
611 * We do this last because the child nodes will open indexscans on our
612 * relation's indexes, and we want to be sure we have acquired a lock on
613 * the relation first.
615 outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags);