OSDN Git Service

72a69e52b02878aba076a8b9f15d70149505aed0
[pg-rex/syncrep.git] / src / backend / access / heap / hio.c
1 /*-------------------------------------------------------------------------
2  *
3  * hio.c
4  *        POSTGRES heap access method input/output code.
5  *
6  * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        src/backend/access/heap/hio.c
12  *
13  *-------------------------------------------------------------------------
14  */
15
16 #include "postgres.h"
17
18 #include "access/heapam.h"
19 #include "access/hio.h"
20 #include "storage/bufmgr.h"
21 #include "storage/freespace.h"
22 #include "storage/lmgr.h"
23 #include "storage/smgr.h"
24
25
26 /*
27  * RelationPutHeapTuple - place tuple at specified page
28  *
29  * !!! EREPORT(ERROR) IS DISALLOWED HERE !!!  Must PANIC on failure!!!
30  *
31  * Note - caller must hold BUFFER_LOCK_EXCLUSIVE on the buffer.
32  */
33 void
34 RelationPutHeapTuple(Relation relation,
35                                          Buffer buffer,
36                                          HeapTuple tuple)
37 {
38         Page            pageHeader;
39         OffsetNumber offnum;
40         ItemId          itemId;
41         Item            item;
42
43         /* Add the tuple to the page */
44         pageHeader = BufferGetPage(buffer);
45
46         offnum = PageAddItem(pageHeader, (Item) tuple->t_data,
47                                                  tuple->t_len, InvalidOffsetNumber, false, true);
48
49         if (offnum == InvalidOffsetNumber)
50                 elog(PANIC, "failed to add tuple to page");
51
52         /* Update tuple->t_self to the actual position where it was stored */
53         ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
54
55         /* Insert the correct position into CTID of the stored tuple, too */
56         itemId = PageGetItemId(pageHeader, offnum);
57         item = PageGetItem(pageHeader, itemId);
58         ((HeapTupleHeader) item)->t_ctid = tuple->t_self;
59 }
60
61 /*
62  * Read in a buffer, using bulk-insert strategy if bistate isn't NULL.
63  */
64 static Buffer
65 ReadBufferBI(Relation relation, BlockNumber targetBlock,
66                          BulkInsertState bistate)
67 {
68         Buffer          buffer;
69
70         /* If not bulk-insert, exactly like ReadBuffer */
71         if (!bistate)
72                 return ReadBuffer(relation, targetBlock);
73
74         /* If we have the desired block already pinned, re-pin and return it */
75         if (bistate->current_buf != InvalidBuffer)
76         {
77                 if (BufferGetBlockNumber(bistate->current_buf) == targetBlock)
78                 {
79                         IncrBufferRefCount(bistate->current_buf);
80                         return bistate->current_buf;
81                 }
82                 /* ... else drop the old buffer */
83                 ReleaseBuffer(bistate->current_buf);
84                 bistate->current_buf = InvalidBuffer;
85         }
86
87         /* Perform a read using the buffer strategy */
88         buffer = ReadBufferExtended(relation, MAIN_FORKNUM, targetBlock,
89                                                                 RBM_NORMAL, bistate->strategy);
90
91         /* Save the selected block as target for future inserts */
92         IncrBufferRefCount(buffer);
93         bistate->current_buf = buffer;
94
95         return buffer;
96 }
97
98 /*
99  * RelationGetBufferForTuple
100  *
101  *      Returns pinned and exclusive-locked buffer of a page in given relation
102  *      with free space >= given len.
103  *
104  *      If otherBuffer is not InvalidBuffer, then it references a previously
105  *      pinned buffer of another page in the same relation; on return, this
106  *      buffer will also be exclusive-locked.  (This case is used by heap_update;
107  *      the otherBuffer contains the tuple being updated.)
108  *
109  *      The reason for passing otherBuffer is that if two backends are doing
110  *      concurrent heap_update operations, a deadlock could occur if they try
111  *      to lock the same two buffers in opposite orders.  To ensure that this
112  *      can't happen, we impose the rule that buffers of a relation must be
113  *      locked in increasing page number order.  This is most conveniently done
114  *      by having RelationGetBufferForTuple lock them both, with suitable care
115  *      for ordering.
116  *
117  *      NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
118  *      same buffer we select for insertion of the new tuple (this could only
119  *      happen if space is freed in that page after heap_update finds there's not
120  *      enough there).  In that case, the page will be pinned and locked only once.
121  *
122  *      We normally use FSM to help us find free space.  However,
123  *      if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
124  *      the end of the relation if the tuple won't fit on the current target page.
125  *      This can save some cycles when we know the relation is new and doesn't
126  *      contain useful amounts of free space.
127  *
128  *      HEAP_INSERT_SKIP_FSM is also useful for non-WAL-logged additions to a
129  *      relation, if the caller holds exclusive lock and is careful to invalidate
130  *      relation's smgr_targblock before the first insertion --- that ensures that
131  *      all insertions will occur into newly added pages and not be intermixed
132  *      with tuples from other transactions.  That way, a crash can't risk losing
133  *      any committed data of other transactions.  (See heap_insert's comments
134  *      for additional constraints needed for safe usage of this behavior.)
135  *
136  *      The caller can also provide a BulkInsertState object to optimize many
137  *      insertions into the same relation.      This keeps a pin on the current
138  *      insertion target page (to save pin/unpin cycles) and also passes a
139  *      BULKWRITE buffer selection strategy object to the buffer manager.
140  *      Passing NULL for bistate selects the default behavior.
141  *
142  *      We always try to avoid filling existing pages further than the fillfactor.
143  *      This is OK since this routine is not consulted when updating a tuple and
144  *      keeping it on the same page, which is the scenario fillfactor is meant
145  *      to reserve space for.
146  *
147  *      ereport(ERROR) is allowed here, so this routine *must* be called
148  *      before any (unlogged) changes are made in buffer pool.
149  */
150 Buffer
151 RelationGetBufferForTuple(Relation relation, Size len,
152                                                   Buffer otherBuffer, int options,
153                                                   struct BulkInsertStateData * bistate)
154 {
155         bool            use_fsm = !(options & HEAP_INSERT_SKIP_FSM);
156         Buffer          buffer = InvalidBuffer;
157         Page            page;
158         Size            pageFreeSpace,
159                                 saveFreeSpace;
160         BlockNumber targetBlock,
161                                 otherBlock;
162         bool            needLock;
163
164         len = MAXALIGN(len);            /* be conservative */
165
166         /* Bulk insert is not supported for updates, only inserts. */
167         Assert(otherBuffer == InvalidBuffer || !bistate);
168
169         /*
170          * If we're gonna fail for oversize tuple, do it right away
171          */
172         if (len > MaxHeapTupleSize)
173                 ereport(ERROR,
174                                 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
175                                  errmsg("row is too big: size %lu, maximum size %lu",
176                                                 (unsigned long) len,
177                                                 (unsigned long) MaxHeapTupleSize)));
178
179         /* Compute desired extra freespace due to fillfactor option */
180         saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
181                                                                                                    HEAP_DEFAULT_FILLFACTOR);
182
183         if (otherBuffer != InvalidBuffer)
184                 otherBlock = BufferGetBlockNumber(otherBuffer);
185         else
186                 otherBlock = InvalidBlockNumber;                /* just to keep compiler quiet */
187
188         /*
189          * We first try to put the tuple on the same page we last inserted a tuple
190          * on, as cached in the BulkInsertState or relcache entry.      If that
191          * doesn't work, we ask the Free Space Map to locate a suitable page.
192          * Since the FSM's info might be out of date, we have to be prepared to
193          * loop around and retry multiple times. (To insure this isn't an infinite
194          * loop, we must update the FSM with the correct amount of free space on
195          * each page that proves not to be suitable.)  If the FSM has no record of
196          * a page with enough free space, we give up and extend the relation.
197          *
198          * When use_fsm is false, we either put the tuple onto the existing target
199          * page or extend the relation.
200          */
201         if (len + saveFreeSpace > MaxHeapTupleSize)
202         {
203                 /* can't fit, don't bother asking FSM */
204                 targetBlock = InvalidBlockNumber;
205                 use_fsm = false;
206         }
207         else if (bistate && bistate->current_buf != InvalidBuffer)
208                 targetBlock = BufferGetBlockNumber(bistate->current_buf);
209         else
210                 targetBlock = RelationGetTargetBlock(relation);
211
212         if (targetBlock == InvalidBlockNumber && use_fsm)
213         {
214                 /*
215                  * We have no cached target page, so ask the FSM for an initial
216                  * target.
217                  */
218                 targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
219
220                 /*
221                  * If the FSM knows nothing of the rel, try the last page before we
222                  * give up and extend.  This avoids one-tuple-per-page syndrome during
223                  * bootstrapping or in a recently-started system.
224                  */
225                 if (targetBlock == InvalidBlockNumber)
226                 {
227                         BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
228
229                         if (nblocks > 0)
230                                 targetBlock = nblocks - 1;
231                 }
232         }
233
234         while (targetBlock != InvalidBlockNumber)
235         {
236                 /*
237                  * Read and exclusive-lock the target block, as well as the other
238                  * block if one was given, taking suitable care with lock ordering and
239                  * the possibility they are the same block.
240                  */
241                 if (otherBuffer == InvalidBuffer)
242                 {
243                         /* easy case */
244                         buffer = ReadBufferBI(relation, targetBlock, bistate);
245                         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
246                 }
247                 else if (otherBlock == targetBlock)
248                 {
249                         /* also easy case */
250                         buffer = otherBuffer;
251                         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
252                 }
253                 else if (otherBlock < targetBlock)
254                 {
255                         /* lock other buffer first */
256                         buffer = ReadBuffer(relation, targetBlock);
257                         LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
258                         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
259                 }
260                 else
261                 {
262                         /* lock target buffer first */
263                         buffer = ReadBuffer(relation, targetBlock);
264                         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
265                         LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
266                 }
267
268                 /*
269                  * Now we can check to see if there's enough free space here. If so,
270                  * we're done.
271                  */
272                 page = BufferGetPage(buffer);
273                 pageFreeSpace = PageGetHeapFreeSpace(page);
274                 if (len + saveFreeSpace <= pageFreeSpace)
275                 {
276                         /* use this page as future insert target, too */
277                         RelationSetTargetBlock(relation, targetBlock);
278                         return buffer;
279                 }
280
281                 /*
282                  * Not enough space, so we must give up our page locks and pin (if
283                  * any) and prepare to look elsewhere.  We don't care which order we
284                  * unlock the two buffers in, so this can be slightly simpler than the
285                  * code above.
286                  */
287                 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
288                 if (otherBuffer == InvalidBuffer)
289                         ReleaseBuffer(buffer);
290                 else if (otherBlock != targetBlock)
291                 {
292                         LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
293                         ReleaseBuffer(buffer);
294                 }
295
296                 /* Without FSM, always fall out of the loop and extend */
297                 if (!use_fsm)
298                         break;
299
300                 /*
301                  * Update FSM as to condition of this page, and ask for another page
302                  * to try.
303                  */
304                 targetBlock = RecordAndGetPageWithFreeSpace(relation,
305                                                                                                         targetBlock,
306                                                                                                         pageFreeSpace,
307                                                                                                         len + saveFreeSpace);
308         }
309
310         /*
311          * Have to extend the relation.
312          *
313          * We have to use a lock to ensure no one else is extending the rel at the
314          * same time, else we will both try to initialize the same new page.  We
315          * can skip locking for new or temp relations, however, since no one else
316          * could be accessing them.
317          */
318         needLock = !RELATION_IS_LOCAL(relation);
319
320         if (needLock)
321                 LockRelationForExtension(relation, ExclusiveLock);
322
323         /*
324          * XXX This does an lseek - rather expensive - but at the moment it is the
325          * only way to accurately determine how many blocks are in a relation.  Is
326          * it worth keeping an accurate file length in shared memory someplace,
327          * rather than relying on the kernel to do it for us?
328          */
329         buffer = ReadBufferBI(relation, P_NEW, bistate);
330
331         /*
332          * We can be certain that locking the otherBuffer first is OK, since it
333          * must have a lower page number.
334          */
335         if (otherBuffer != InvalidBuffer)
336                 LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
337
338         /*
339          * Now acquire lock on the new page.
340          */
341         LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
342
343         /*
344          * Release the file-extension lock; it's now OK for someone else to extend
345          * the relation some more.      Note that we cannot release this lock before
346          * we have buffer lock on the new page, or we risk a race condition
347          * against vacuumlazy.c --- see comments therein.
348          */
349         if (needLock)
350                 UnlockRelationForExtension(relation, ExclusiveLock);
351
352         /*
353          * We need to initialize the empty new page.  Double-check that it really
354          * is empty (this should never happen, but if it does we don't want to
355          * risk wiping out valid data).
356          */
357         page = BufferGetPage(buffer);
358
359         if (!PageIsNew(page))
360                 elog(ERROR, "page %u of relation \"%s\" should be empty but is not",
361                          BufferGetBlockNumber(buffer),
362                          RelationGetRelationName(relation));
363
364         PageInit(page, BufferGetPageSize(buffer), 0);
365
366         if (len > PageGetHeapFreeSpace(page))
367         {
368                 /* We should not get here given the test at the top */
369                 elog(PANIC, "tuple is too big: size %lu", (unsigned long) len);
370         }
371
372         /*
373          * Remember the new page as our target for future insertions.
374          *
375          * XXX should we enter the new page into the free space map immediately,
376          * or just keep it for this backend's exclusive use in the short run
377          * (until VACUUM sees it)?      Seems to depend on whether you expect the
378          * current backend to make more insertions or not, which is probably a
379          * good bet most of the time.  So for now, don't add it to FSM yet.
380          */
381         RelationSetTargetBlock(relation, BufferGetBlockNumber(buffer));
382
383         return buffer;
384 }