1 /*-------------------------------------------------------------------------
4 * POSTGRES heap access method input/output code.
6 * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/heap/hio.c
13 *-------------------------------------------------------------------------
18 #include "access/heapam.h"
19 #include "access/hio.h"
20 #include "access/visibilitymap.h"
21 #include "storage/bufmgr.h"
22 #include "storage/freespace.h"
23 #include "storage/lmgr.h"
24 #include "storage/smgr.h"
28 * RelationPutHeapTuple - place tuple at specified page
30 * !!! EREPORT(ERROR) IS DISALLOWED HERE !!! Must PANIC on failure!!!
32 * Note - caller must hold BUFFER_LOCK_EXCLUSIVE on the buffer.
35 RelationPutHeapTuple(Relation relation,
44 /* Add the tuple to the page */
45 pageHeader = BufferGetPage(buffer);
47 offnum = PageAddItem(pageHeader, (Item) tuple->t_data,
48 tuple->t_len, InvalidOffsetNumber, false, true);
50 if (offnum == InvalidOffsetNumber)
51 elog(PANIC, "failed to add tuple to page");
53 /* Update tuple->t_self to the actual position where it was stored */
54 ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
56 /* Insert the correct position into CTID of the stored tuple, too */
57 itemId = PageGetItemId(pageHeader, offnum);
58 item = PageGetItem(pageHeader, itemId);
59 ((HeapTupleHeader) item)->t_ctid = tuple->t_self;
63 * Read in a buffer, using bulk-insert strategy if bistate isn't NULL.
66 ReadBufferBI(Relation relation, BlockNumber targetBlock,
67 BulkInsertState bistate)
71 /* If not bulk-insert, exactly like ReadBuffer */
73 return ReadBuffer(relation, targetBlock);
75 /* If we have the desired block already pinned, re-pin and return it */
76 if (bistate->current_buf != InvalidBuffer)
78 if (BufferGetBlockNumber(bistate->current_buf) == targetBlock)
80 IncrBufferRefCount(bistate->current_buf);
81 return bistate->current_buf;
83 /* ... else drop the old buffer */
84 ReleaseBuffer(bistate->current_buf);
85 bistate->current_buf = InvalidBuffer;
88 /* Perform a read using the buffer strategy */
89 buffer = ReadBufferExtended(relation, MAIN_FORKNUM, targetBlock,
90 RBM_NORMAL, bistate->strategy);
92 /* Save the selected block as target for future inserts */
93 IncrBufferRefCount(buffer);
94 bistate->current_buf = buffer;
100 * RelationGetBufferForTuple
102 * Returns pinned and exclusive-locked buffer of a page in given relation
103 * with free space >= given len.
105 * If otherBuffer is not InvalidBuffer, then it references a previously
106 * pinned buffer of another page in the same relation; on return, this
107 * buffer will also be exclusive-locked. (This case is used by heap_update;
108 * the otherBuffer contains the tuple being updated.)
110 * The reason for passing otherBuffer is that if two backends are doing
111 * concurrent heap_update operations, a deadlock could occur if they try
112 * to lock the same two buffers in opposite orders. To ensure that this
113 * can't happen, we impose the rule that buffers of a relation must be
114 * locked in increasing page number order. This is most conveniently done
115 * by having RelationGetBufferForTuple lock them both, with suitable care
118 * NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
119 * same buffer we select for insertion of the new tuple (this could only
120 * happen if space is freed in that page after heap_update finds there's not
121 * enough there). In that case, the page will be pinned and locked only once.
123 * We normally use FSM to help us find free space. However,
124 * if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
125 * the end of the relation if the tuple won't fit on the current target page.
126 * This can save some cycles when we know the relation is new and doesn't
127 * contain useful amounts of free space.
129 * HEAP_INSERT_SKIP_FSM is also useful for non-WAL-logged additions to a
130 * relation, if the caller holds exclusive lock and is careful to invalidate
131 * relation's smgr_targblock before the first insertion --- that ensures that
132 * all insertions will occur into newly added pages and not be intermixed
133 * with tuples from other transactions. That way, a crash can't risk losing
134 * any committed data of other transactions. (See heap_insert's comments
135 * for additional constraints needed for safe usage of this behavior.)
137 * The caller can also provide a BulkInsertState object to optimize many
138 * insertions into the same relation. This keeps a pin on the current
139 * insertion target page (to save pin/unpin cycles) and also passes a
140 * BULKWRITE buffer selection strategy object to the buffer manager.
141 * Passing NULL for bistate selects the default behavior.
143 * We always try to avoid filling existing pages further than the fillfactor.
144 * This is OK since this routine is not consulted when updating a tuple and
145 * keeping it on the same page, which is the scenario fillfactor is meant
146 * to reserve space for.
148 * ereport(ERROR) is allowed here, so this routine *must* be called
149 * before any (unlogged) changes are made in buffer pool.
152 RelationGetBufferForTuple(Relation relation, Size len,
153 Buffer otherBuffer, int options,
154 struct BulkInsertStateData * bistate,
157 bool use_fsm = !(options & HEAP_INSERT_SKIP_FSM);
158 Buffer buffer = InvalidBuffer;
162 BlockNumber targetBlock,
166 len = MAXALIGN(len); /* be conservative */
168 /* Bulk insert is not supported for updates, only inserts. */
169 Assert(otherBuffer == InvalidBuffer || !bistate);
172 * If we're gonna fail for oversize tuple, do it right away
174 if (len > MaxHeapTupleSize)
176 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
177 errmsg("row is too big: size %lu, maximum size %lu",
179 (unsigned long) MaxHeapTupleSize)));
181 /* Compute desired extra freespace due to fillfactor option */
182 saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
183 HEAP_DEFAULT_FILLFACTOR);
185 if (otherBuffer != InvalidBuffer)
186 otherBlock = BufferGetBlockNumber(otherBuffer);
188 otherBlock = InvalidBlockNumber; /* just to keep compiler quiet */
191 * We first try to put the tuple on the same page we last inserted a tuple
192 * on, as cached in the BulkInsertState or relcache entry. If that
193 * doesn't work, we ask the Free Space Map to locate a suitable page.
194 * Since the FSM's info might be out of date, we have to be prepared to
195 * loop around and retry multiple times. (To insure this isn't an infinite
196 * loop, we must update the FSM with the correct amount of free space on
197 * each page that proves not to be suitable.) If the FSM has no record of
198 * a page with enough free space, we give up and extend the relation.
200 * When use_fsm is false, we either put the tuple onto the existing target
201 * page or extend the relation.
203 if (len + saveFreeSpace > MaxHeapTupleSize)
205 /* can't fit, don't bother asking FSM */
206 targetBlock = InvalidBlockNumber;
209 else if (bistate && bistate->current_buf != InvalidBuffer)
210 targetBlock = BufferGetBlockNumber(bistate->current_buf);
212 targetBlock = RelationGetTargetBlock(relation);
214 if (targetBlock == InvalidBlockNumber && use_fsm)
217 * We have no cached target page, so ask the FSM for an initial
220 targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
223 * If the FSM knows nothing of the rel, try the last page before we
224 * give up and extend. This avoids one-tuple-per-page syndrome during
225 * bootstrapping or in a recently-started system.
227 if (targetBlock == InvalidBlockNumber)
229 BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
232 targetBlock = nblocks - 1;
236 while (targetBlock != InvalidBlockNumber)
239 * Read and exclusive-lock the target block, as well as the other
240 * block if one was given, taking suitable care with lock ordering and
241 * the possibility they are the same block.
243 * If the page-level all-visible flag is set, caller will need to clear
244 * both that and the corresponding visibility map bit. However, by the
245 * time we return, we'll have x-locked the buffer, and we don't want to
246 * do any I/O while in that state. So we check the bit here before
247 * taking the lock, and pin the page if it appears necessary.
248 * Checking without the lock creates a risk of getting the wrong
249 * answer, so we'll have to recheck after acquiring the lock.
251 if (otherBuffer == InvalidBuffer)
254 buffer = ReadBufferBI(relation, targetBlock, bistate);
255 if (PageIsAllVisible(BufferGetPage(buffer)))
256 visibilitymap_pin(relation, targetBlock, vmbuffer);
257 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
259 else if (otherBlock == targetBlock)
262 buffer = otherBuffer;
263 if (PageIsAllVisible(BufferGetPage(buffer)))
264 visibilitymap_pin(relation, targetBlock, vmbuffer);
265 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
267 else if (otherBlock < targetBlock)
269 /* lock other buffer first */
270 buffer = ReadBuffer(relation, targetBlock);
271 if (PageIsAllVisible(BufferGetPage(buffer)))
272 visibilitymap_pin(relation, targetBlock, vmbuffer);
273 LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
274 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
278 /* lock target buffer first */
279 buffer = ReadBuffer(relation, targetBlock);
280 if (PageIsAllVisible(BufferGetPage(buffer)))
281 visibilitymap_pin(relation, targetBlock, vmbuffer);
282 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
283 LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
287 * If the page is all visible but we don't have the right visibility
288 * map page pinned, then give up our locks, go get the pin, and
289 * re-lock. This is pretty painful, but hopefully shouldn't happen
290 * often. Note that there's a small possibility that we didn't pin
291 * the page above but still have the correct page pinned anyway, either
292 * because we've already made a previous pass through this loop, or
293 * because caller passed us the right page anyway.
295 * Note also that it's possible that by the time we get the pin and
296 * retake the buffer locks, the visibility map bit will have been
297 * cleared by some other backend anyway. In that case, we'll have done
298 * a bit of extra work for no gain, but there's no real harm done.
300 if (PageIsAllVisible(BufferGetPage(buffer))
301 && !visibilitymap_pin_ok(targetBlock, *vmbuffer))
303 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
304 if (otherBlock != targetBlock)
305 LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
306 visibilitymap_pin(relation, targetBlock, vmbuffer);
307 if (otherBuffer != InvalidBuffer && otherBlock < targetBlock)
308 LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
309 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
310 if (otherBuffer != InvalidBuffer && otherBlock > targetBlock)
311 LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
315 * Now we can check to see if there's enough free space here. If so,
318 page = BufferGetPage(buffer);
319 pageFreeSpace = PageGetHeapFreeSpace(page);
320 if (len + saveFreeSpace <= pageFreeSpace)
322 /* use this page as future insert target, too */
323 RelationSetTargetBlock(relation, targetBlock);
328 * Not enough space, so we must give up our page locks and pin (if
329 * any) and prepare to look elsewhere. We don't care which order we
330 * unlock the two buffers in, so this can be slightly simpler than the
333 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
334 if (otherBuffer == InvalidBuffer)
335 ReleaseBuffer(buffer);
336 else if (otherBlock != targetBlock)
338 LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
339 ReleaseBuffer(buffer);
342 /* Without FSM, always fall out of the loop and extend */
347 * Update FSM as to condition of this page, and ask for another page
350 targetBlock = RecordAndGetPageWithFreeSpace(relation,
353 len + saveFreeSpace);
357 * Have to extend the relation.
359 * We have to use a lock to ensure no one else is extending the rel at the
360 * same time, else we will both try to initialize the same new page. We
361 * can skip locking for new or temp relations, however, since no one else
362 * could be accessing them.
364 needLock = !RELATION_IS_LOCAL(relation);
367 LockRelationForExtension(relation, ExclusiveLock);
370 * XXX This does an lseek - rather expensive - but at the moment it is the
371 * only way to accurately determine how many blocks are in a relation. Is
372 * it worth keeping an accurate file length in shared memory someplace,
373 * rather than relying on the kernel to do it for us?
375 buffer = ReadBufferBI(relation, P_NEW, bistate);
378 * We can be certain that locking the otherBuffer first is OK, since it
379 * must have a lower page number.
381 if (otherBuffer != InvalidBuffer)
382 LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
385 * Now acquire lock on the new page.
387 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
390 * Release the file-extension lock; it's now OK for someone else to extend
391 * the relation some more. Note that we cannot release this lock before
392 * we have buffer lock on the new page, or we risk a race condition
393 * against vacuumlazy.c --- see comments therein.
396 UnlockRelationForExtension(relation, ExclusiveLock);
399 * We need to initialize the empty new page. Double-check that it really
400 * is empty (this should never happen, but if it does we don't want to
401 * risk wiping out valid data).
403 page = BufferGetPage(buffer);
405 if (!PageIsNew(page))
406 elog(ERROR, "page %u of relation \"%s\" should be empty but is not",
407 BufferGetBlockNumber(buffer),
408 RelationGetRelationName(relation));
410 PageInit(page, BufferGetPageSize(buffer), 0);
412 if (len > PageGetHeapFreeSpace(page))
414 /* We should not get here given the test at the top */
415 elog(PANIC, "tuple is too big: size %lu", (unsigned long) len);
419 * Remember the new page as our target for future insertions.
421 * XXX should we enter the new page into the free space map immediately,
422 * or just keep it for this backend's exclusive use in the short run
423 * (until VACUUM sees it)? Seems to depend on whether you expect the
424 * current backend to make more insertions or not, which is probably a
425 * good bet most of the time. So for now, don't add it to FSM yet.
427 RelationSetTargetBlock(relation, BufferGetBlockNumber(buffer));