1 /*-------------------------------------------------------------------------
4 * Simple LRU buffering for transaction status logfiles
6 * We use a simple least-recently-used scheme to manage a pool of page
7 * buffers. Under ordinary circumstances we expect that write
8 * traffic will occur mostly to the latest page (and to the just-prior
9 * page, soon after a page transition). Read traffic will probably touch
10 * a larger span of pages, but in any case a fairly small number of page
11 * buffers should be sufficient. So, we just search the buffers using plain
12 * linear search; there's no need for a hashtable or anything fancy.
13 * The management algorithm is straight LRU except that we will never swap
14 * out the latest page (since we know it's going to be hit again eventually).
16 * We use a control LWLock to protect the shared data structures, plus
17 * per-buffer LWLocks that synchronize I/O for each buffer. The control lock
18 * must be held to examine or modify any shared state. A process that is
19 * reading in or writing out a page buffer does not hold the control lock,
20 * only the per-buffer lock for the buffer it is working on.
22 * "Holding the control lock" means exclusive lock in all cases except for
23 * SimpleLruReadPage_ReadOnly(); see comments for SlruRecentlyUsed() for
24 * the implications of that.
26 * When initiating I/O on a buffer, we acquire the per-buffer lock exclusively
27 * before releasing the control lock. The per-buffer lock is released after
28 * completing the I/O, re-acquiring the control lock, and updating the shared
29 * state. (Deadlock is not possible here, because we never try to initiate
30 * I/O when someone else is already doing I/O on the same buffer.)
31 * To wait for I/O to complete, release the control lock, acquire the
32 * per-buffer lock in shared mode, immediately release the per-buffer lock,
33 * reacquire the control lock, and then recheck state (since arbitrary things
34 * could have happened while we didn't have the lock).
36 * As with the regular buffer manager, it is possible for another process
37 * to re-dirty a page that is currently being written out. This is handled
38 * by re-setting the page's page_dirty flag.
41 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
42 * Portions Copyright (c) 1994, Regents of the University of California
44 * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.45 2009/01/01 17:23:36 momjian Exp $
46 *-------------------------------------------------------------------------
54 #include "access/slru.h"
55 #include "access/transam.h"
56 #include "access/xlog.h"
57 #include "storage/fd.h"
58 #include "storage/shmem.h"
59 #include "miscadmin.h"
63 * Define segment size. A page is the same BLCKSZ as is used everywhere
64 * else in Postgres. The segment size can be chosen somewhat arbitrarily;
65 * we make it 32 pages by default, or 256Kb, i.e. 1M transactions for CLOG
66 * or 64K transactions for SUBTRANS.
68 * Note: because TransactionIds are 32 bits and wrap around at 0xFFFFFFFF,
69 * page numbering also wraps around at 0xFFFFFFFF/xxxx_XACTS_PER_PAGE (where
70 * xxxx is CLOG or SUBTRANS, respectively), and segment numbering at
71 * 0xFFFFFFFF/xxxx_XACTS_PER_PAGE/SLRU_PAGES_PER_SEGMENT. We need
72 * take no explicit notice of that fact in this module, except when comparing
73 * segment and page numbers in SimpleLruTruncate (see PagePrecedes()).
75 * Note: this file currently assumes that segment file names will be four
76 * hex digits. This sets a lower bound on the segment size (64K transactions
77 * for 32-bit TransactionIds).
79 #define SLRU_PAGES_PER_SEGMENT 32
81 #define SlruFileName(ctl, path, seg) \
82 snprintf(path, MAXPGPATH, "%s/%04X", (ctl)->Dir, seg)
85 * During SimpleLruFlush(), we will usually not need to write/fsync more
86 * than one or two physical files, but we may need to write several pages
87 * per file. We can consolidate the I/O requests by leaving files open
88 * until control returns to SimpleLruFlush(). This data structure remembers
89 * which files are open.
91 #define MAX_FLUSH_BUFFERS 16
93 typedef struct SlruFlushData
95 int num_files; /* # files actually open */
96 int fd[MAX_FLUSH_BUFFERS]; /* their FD's */
97 int segno[MAX_FLUSH_BUFFERS]; /* their log seg#s */
101 * Macro to mark a buffer slot "most recently used". Note multiple evaluation
104 * The reason for the if-test is that there are often many consecutive
105 * accesses to the same page (particularly the latest page). By suppressing
106 * useless increments of cur_lru_count, we reduce the probability that old
107 * pages' counts will "wrap around" and make them appear recently used.
109 * We allow this code to be executed concurrently by multiple processes within
110 * SimpleLruReadPage_ReadOnly(). As long as int reads and writes are atomic,
111 * this should not cause any completely-bogus values to enter the computation.
112 * However, it is possible for either cur_lru_count or individual
113 * page_lru_count entries to be "reset" to lower values than they should have,
114 * in case a process is delayed while it executes this macro. With care in
115 * SlruSelectLRUPage(), this does little harm, and in any case the absolute
116 * worst possible consequence is a nonoptimal choice of page to evict. The
117 * gain from allowing concurrent reads of SLRU pages seems worth it.
119 #define SlruRecentlyUsed(shared, slotno) \
121 int new_lru_count = (shared)->cur_lru_count; \
122 if (new_lru_count != (shared)->page_lru_count[slotno]) { \
123 (shared)->cur_lru_count = ++new_lru_count; \
124 (shared)->page_lru_count[slotno] = new_lru_count; \
128 /* Saved info for SlruReportIOError */
139 static SlruErrorCause slru_errcause;
140 static int slru_errno;
143 static void SimpleLruZeroLSNs(SlruCtl ctl, int slotno);
144 static void SimpleLruWaitIO(SlruCtl ctl, int slotno);
145 static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno);
146 static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno,
148 static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid);
149 static int SlruSelectLRUPage(SlruCtl ctl, int pageno);
153 * Initialization of shared memory
157 SimpleLruShmemSize(int nslots, int nlsns)
161 /* we assume nslots isn't so large as to risk overflow */
162 sz = MAXALIGN(sizeof(SlruSharedData));
163 sz += MAXALIGN(nslots * sizeof(char *)); /* page_buffer[] */
164 sz += MAXALIGN(nslots * sizeof(SlruPageStatus)); /* page_status[] */
165 sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
166 sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
167 sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
168 sz += MAXALIGN(nslots * sizeof(LWLockId)); /* buffer_locks[] */
171 sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
173 return BUFFERALIGN(sz) + BLCKSZ * nslots;
177 SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
178 LWLockId ctllock, const char *subdir)
183 shared = (SlruShared) ShmemInitStruct(name,
184 SimpleLruShmemSize(nslots, nlsns),
187 if (!IsUnderPostmaster)
189 /* Initialize locks and shared memory area */
196 memset(shared, 0, sizeof(SlruSharedData));
198 shared->ControlLock = ctllock;
200 shared->num_slots = nslots;
201 shared->lsn_groups_per_page = nlsns;
203 shared->cur_lru_count = 0;
205 /* shared->latest_page_number will be set later */
207 ptr = (char *) shared;
208 offset = MAXALIGN(sizeof(SlruSharedData));
209 shared->page_buffer = (char **) (ptr + offset);
210 offset += MAXALIGN(nslots * sizeof(char *));
211 shared->page_status = (SlruPageStatus *) (ptr + offset);
212 offset += MAXALIGN(nslots * sizeof(SlruPageStatus));
213 shared->page_dirty = (bool *) (ptr + offset);
214 offset += MAXALIGN(nslots * sizeof(bool));
215 shared->page_number = (int *) (ptr + offset);
216 offset += MAXALIGN(nslots * sizeof(int));
217 shared->page_lru_count = (int *) (ptr + offset);
218 offset += MAXALIGN(nslots * sizeof(int));
219 shared->buffer_locks = (LWLockId *) (ptr + offset);
220 offset += MAXALIGN(nslots * sizeof(LWLockId));
224 shared->group_lsn = (XLogRecPtr *) (ptr + offset);
225 offset += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr));
228 ptr += BUFFERALIGN(offset);
229 for (slotno = 0; slotno < nslots; slotno++)
231 shared->page_buffer[slotno] = ptr;
232 shared->page_status[slotno] = SLRU_PAGE_EMPTY;
233 shared->page_dirty[slotno] = false;
234 shared->page_lru_count[slotno] = 0;
235 shared->buffer_locks[slotno] = LWLockAssign();
243 * Initialize the unshared control struct, including directory path. We
244 * assume caller set PagePrecedes.
246 ctl->shared = shared;
247 ctl->do_fsync = true; /* default behavior */
248 StrNCpy(ctl->Dir, subdir, sizeof(ctl->Dir));
252 * Initialize (or reinitialize) a page to zeroes.
254 * The page is not actually written, just set up in shared memory.
255 * The slot number of the new page is returned.
257 * Control lock must be held at entry, and will be held at exit.
260 SimpleLruZeroPage(SlruCtl ctl, int pageno)
262 SlruShared shared = ctl->shared;
265 /* Find a suitable buffer slot for the page */
266 slotno = SlruSelectLRUPage(ctl, pageno);
267 Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
268 (shared->page_status[slotno] == SLRU_PAGE_VALID &&
269 !shared->page_dirty[slotno]) ||
270 shared->page_number[slotno] == pageno);
272 /* Mark the slot as containing this page */
273 shared->page_number[slotno] = pageno;
274 shared->page_status[slotno] = SLRU_PAGE_VALID;
275 shared->page_dirty[slotno] = true;
276 SlruRecentlyUsed(shared, slotno);
278 /* Set the buffer to zeroes */
279 MemSet(shared->page_buffer[slotno], 0, BLCKSZ);
281 /* Set the LSNs for this new page to zero */
282 SimpleLruZeroLSNs(ctl, slotno);
284 /* Assume this page is now the latest active page */
285 shared->latest_page_number = pageno;
291 * Zero all the LSNs we store for this slru page.
293 * This should be called each time we create a new page, and each time we read
294 * in a page from disk into an existing buffer. (Such an old page cannot
295 * have any interesting LSNs, since we'd have flushed them before writing
296 * the page in the first place.)
299 SimpleLruZeroLSNs(SlruCtl ctl, int slotno)
301 SlruShared shared = ctl->shared;
303 if (shared->lsn_groups_per_page > 0)
304 MemSet(&shared->group_lsn[slotno * shared->lsn_groups_per_page], 0,
305 shared->lsn_groups_per_page * sizeof(XLogRecPtr));
309 * Wait for any active I/O on a page slot to finish. (This does not
310 * guarantee that new I/O hasn't been started before we return, though.
311 * In fact the slot might not even contain the same page anymore.)
313 * Control lock must be held at entry, and will be held at exit.
316 SimpleLruWaitIO(SlruCtl ctl, int slotno)
318 SlruShared shared = ctl->shared;
320 /* See notes at top of file */
321 LWLockRelease(shared->ControlLock);
322 LWLockAcquire(shared->buffer_locks[slotno], LW_SHARED);
323 LWLockRelease(shared->buffer_locks[slotno]);
324 LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
327 * If the slot is still in an io-in-progress state, then either someone
328 * already started a new I/O on the slot, or a previous I/O failed and
329 * neglected to reset the page state. That shouldn't happen, really, but
330 * it seems worth a few extra cycles to check and recover from it. We can
331 * cheaply test for failure by seeing if the buffer lock is still held (we
332 * assume that transaction abort would release the lock).
334 if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
335 shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS)
337 if (LWLockConditionalAcquire(shared->buffer_locks[slotno], LW_SHARED))
339 /* indeed, the I/O must have failed */
340 if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS)
341 shared->page_status[slotno] = SLRU_PAGE_EMPTY;
342 else /* write_in_progress */
344 shared->page_status[slotno] = SLRU_PAGE_VALID;
345 shared->page_dirty[slotno] = true;
347 LWLockRelease(shared->buffer_locks[slotno]);
353 * Find a page in a shared buffer, reading it in if necessary.
354 * The page number must correspond to an already-initialized page.
356 * If write_ok is true then it is OK to return a page that is in
357 * WRITE_IN_PROGRESS state; it is the caller's responsibility to be sure
358 * that modification of the page is safe. If write_ok is false then we
359 * will not return the page until it is not undergoing active I/O.
361 * The passed-in xid is used only for error reporting, and may be
362 * InvalidTransactionId if no specific xid is associated with the action.
364 * Return value is the shared-buffer slot number now holding the page.
365 * The buffer's LRU access info is updated.
367 * Control lock must be held at entry, and will be held at exit.
370 SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok,
373 SlruShared shared = ctl->shared;
375 /* Outer loop handles restart if we must wait for someone else's I/O */
381 /* See if page already is in memory; if not, pick victim slot */
382 slotno = SlruSelectLRUPage(ctl, pageno);
384 /* Did we find the page in memory? */
385 if (shared->page_number[slotno] == pageno &&
386 shared->page_status[slotno] != SLRU_PAGE_EMPTY)
389 * If page is still being read in, we must wait for I/O. Likewise
390 * if the page is being written and the caller said that's not OK.
392 if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
393 (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
396 SimpleLruWaitIO(ctl, slotno);
397 /* Now we must recheck state from the top */
400 /* Otherwise, it's ready to use */
401 SlruRecentlyUsed(shared, slotno);
405 /* We found no match; assert we selected a freeable slot */
406 Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
407 (shared->page_status[slotno] == SLRU_PAGE_VALID &&
408 !shared->page_dirty[slotno]));
410 /* Mark the slot read-busy */
411 shared->page_number[slotno] = pageno;
412 shared->page_status[slotno] = SLRU_PAGE_READ_IN_PROGRESS;
413 shared->page_dirty[slotno] = false;
415 /* Acquire per-buffer lock (cannot deadlock, see notes at top) */
416 LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
419 * Temporarily mark page as recently-used to discourage
420 * SlruSelectLRUPage from selecting it again for someone else.
422 SlruRecentlyUsed(shared, slotno);
424 /* Release control lock while doing I/O */
425 LWLockRelease(shared->ControlLock);
428 ok = SlruPhysicalReadPage(ctl, pageno, slotno);
430 /* Set the LSNs for this newly read-in page to zero */
431 SimpleLruZeroLSNs(ctl, slotno);
433 /* Re-acquire control lock and update page state */
434 LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
436 Assert(shared->page_number[slotno] == pageno &&
437 shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS &&
438 !shared->page_dirty[slotno]);
440 shared->page_status[slotno] = ok ? SLRU_PAGE_VALID : SLRU_PAGE_EMPTY;
442 LWLockRelease(shared->buffer_locks[slotno]);
444 /* Now it's okay to ereport if we failed */
446 SlruReportIOError(ctl, pageno, xid);
448 SlruRecentlyUsed(shared, slotno);
454 * Find a page in a shared buffer, reading it in if necessary.
455 * The page number must correspond to an already-initialized page.
456 * The caller must intend only read-only access to the page.
458 * The passed-in xid is used only for error reporting, and may be
459 * InvalidTransactionId if no specific xid is associated with the action.
461 * Return value is the shared-buffer slot number now holding the page.
462 * The buffer's LRU access info is updated.
464 * Control lock must NOT be held at entry, but will be held at exit.
465 * It is unspecified whether the lock will be shared or exclusive.
468 SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
470 SlruShared shared = ctl->shared;
473 /* Try to find the page while holding only shared lock */
474 LWLockAcquire(shared->ControlLock, LW_SHARED);
476 /* See if page is already in a buffer */
477 for (slotno = 0; slotno < shared->num_slots; slotno++)
479 if (shared->page_number[slotno] == pageno &&
480 shared->page_status[slotno] != SLRU_PAGE_EMPTY &&
481 shared->page_status[slotno] != SLRU_PAGE_READ_IN_PROGRESS)
483 /* See comments for SlruRecentlyUsed macro */
484 SlruRecentlyUsed(shared, slotno);
489 /* No luck, so switch to normal exclusive lock and do regular read */
490 LWLockRelease(shared->ControlLock);
491 LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
493 return SimpleLruReadPage(ctl, pageno, true, xid);
497 * Write a page from a shared buffer, if necessary.
498 * Does nothing if the specified slot is not dirty.
500 * NOTE: only one write attempt is made here. Hence, it is possible that
501 * the page is still dirty at exit (if someone else re-dirtied it during
502 * the write). However, we *do* attempt a fresh write even if the page
503 * is already being written; this is for checkpoints.
505 * Control lock must be held at entry, and will be held at exit.
508 SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
510 SlruShared shared = ctl->shared;
511 int pageno = shared->page_number[slotno];
514 /* If a write is in progress, wait for it to finish */
515 while (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
516 shared->page_number[slotno] == pageno)
518 SimpleLruWaitIO(ctl, slotno);
522 * Do nothing if page is not dirty, or if buffer no longer contains the
523 * same page we were called for.
525 if (!shared->page_dirty[slotno] ||
526 shared->page_status[slotno] != SLRU_PAGE_VALID ||
527 shared->page_number[slotno] != pageno)
531 * Mark the slot write-busy, and clear the dirtybit. After this point, a
532 * transaction status update on this page will mark it dirty again.
534 shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
535 shared->page_dirty[slotno] = false;
537 /* Acquire per-buffer lock (cannot deadlock, see notes at top) */
538 LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
540 /* Release control lock while doing I/O */
541 LWLockRelease(shared->ControlLock);
544 ok = SlruPhysicalWritePage(ctl, pageno, slotno, fdata);
546 /* If we failed, and we're in a flush, better close the files */
551 for (i = 0; i < fdata->num_files; i++)
555 /* Re-acquire control lock and update page state */
556 LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
558 Assert(shared->page_number[slotno] == pageno &&
559 shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS);
561 /* If we failed to write, mark the page dirty again */
563 shared->page_dirty[slotno] = true;
565 shared->page_status[slotno] = SLRU_PAGE_VALID;
567 LWLockRelease(shared->buffer_locks[slotno]);
569 /* Now it's okay to ereport if we failed */
571 SlruReportIOError(ctl, pageno, InvalidTransactionId);
575 * Physical read of a (previously existing) page into a buffer slot
577 * On failure, we cannot just ereport(ERROR) since caller has put state in
578 * shared memory that must be undone. So, we return FALSE and save enough
579 * info in static variables to let SlruReportIOError make the report.
581 * For now, assume it's not worth keeping a file pointer open across
582 * read/write operations. We could cache one virtual file pointer ...
585 SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
587 SlruShared shared = ctl->shared;
588 int segno = pageno / SLRU_PAGES_PER_SEGMENT;
589 int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
590 int offset = rpageno * BLCKSZ;
591 char path[MAXPGPATH];
594 SlruFileName(ctl, path, segno);
597 * In a crash-and-restart situation, it's possible for us to receive
598 * commands to set the commit status of transactions whose bits are in
599 * already-truncated segments of the commit log (see notes in
600 * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
601 * where the file doesn't exist, and return zeroes instead.
603 fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
606 if (errno != ENOENT || !InRecovery)
608 slru_errcause = SLRU_OPEN_FAILED;
614 (errmsg("file \"%s\" doesn't exist, reading as zeroes",
616 MemSet(shared->page_buffer[slotno], 0, BLCKSZ);
620 if (lseek(fd, (off_t) offset, SEEK_SET) < 0)
622 slru_errcause = SLRU_SEEK_FAILED;
629 if (read(fd, shared->page_buffer[slotno], BLCKSZ) != BLCKSZ)
631 slru_errcause = SLRU_READ_FAILED;
639 slru_errcause = SLRU_CLOSE_FAILED;
648 * Physical write of a page from a buffer slot
650 * On failure, we cannot just ereport(ERROR) since caller has put state in
651 * shared memory that must be undone. So, we return FALSE and save enough
652 * info in static variables to let SlruReportIOError make the report.
654 * For now, assume it's not worth keeping a file pointer open across
655 * independent read/write operations. We do batch operations during
656 * SimpleLruFlush, though.
658 * fdata is NULL for a standalone write, pointer to open-file info during
662 SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata)
664 SlruShared shared = ctl->shared;
665 int segno = pageno / SLRU_PAGES_PER_SEGMENT;
666 int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
667 int offset = rpageno * BLCKSZ;
668 char path[MAXPGPATH];
672 * Honor the write-WAL-before-data rule, if appropriate, so that we do not
673 * write out data before associated WAL records. This is the same action
674 * performed during FlushBuffer() in the main buffer manager.
676 if (shared->group_lsn != NULL)
679 * We must determine the largest async-commit LSN for the page. This
680 * is a bit tedious, but since this entire function is a slow path
681 * anyway, it seems better to do this here than to maintain a per-page
682 * LSN variable (which'd need an extra comparison in the
683 * transaction-commit path).
689 lsnindex = slotno * shared->lsn_groups_per_page;
690 max_lsn = shared->group_lsn[lsnindex++];
691 for (lsnoff = 1; lsnoff < shared->lsn_groups_per_page; lsnoff++)
693 XLogRecPtr this_lsn = shared->group_lsn[lsnindex++];
695 if (XLByteLT(max_lsn, this_lsn))
699 if (!XLogRecPtrIsInvalid(max_lsn))
702 * As noted above, elog(ERROR) is not acceptable here, so if
703 * XLogFlush were to fail, we must PANIC. This isn't much of a
704 * restriction because XLogFlush is just about all critical
705 * section anyway, but let's make sure.
707 START_CRIT_SECTION();
714 * During a Flush, we may already have the desired file open.
720 for (i = 0; i < fdata->num_files; i++)
722 if (fdata->segno[i] == segno)
733 * If the file doesn't already exist, we should create it. It is
734 * possible for this to need to happen when writing a page that's not
735 * first in its segment; we assume the OS can cope with that. (Note:
736 * it might seem that it'd be okay to create files only when
737 * SimpleLruZeroPage is called for the first page of a segment.
738 * However, if after a crash and restart the REDO logic elects to
739 * replay the log from a checkpoint before the latest one, then it's
740 * possible that we will get commands to set transaction status of
741 * transactions that have already been truncated from the commit log.
742 * Easiest way to deal with that is to accept references to
743 * nonexistent files here and in SlruPhysicalReadPage.)
745 * Note: it is possible for more than one backend to be executing this
746 * code simultaneously for different pages of the same file. Hence,
747 * don't use O_EXCL or O_TRUNC or anything like that.
749 SlruFileName(ctl, path, segno);
750 fd = BasicOpenFile(path, O_RDWR | O_CREAT | PG_BINARY,
754 slru_errcause = SLRU_OPEN_FAILED;
761 if (fdata->num_files < MAX_FLUSH_BUFFERS)
763 fdata->fd[fdata->num_files] = fd;
764 fdata->segno[fdata->num_files] = segno;
770 * In the unlikely event that we exceed MAX_FLUSH_BUFFERS,
771 * fall back to treating it as a standalone write.
778 if (lseek(fd, (off_t) offset, SEEK_SET) < 0)
780 slru_errcause = SLRU_SEEK_FAILED;
788 if (write(fd, shared->page_buffer[slotno], BLCKSZ) != BLCKSZ)
790 /* if write didn't set errno, assume problem is no disk space */
793 slru_errcause = SLRU_WRITE_FAILED;
801 * If not part of Flush, need to fsync now. We assume this happens
802 * infrequently enough that it's not a performance issue.
806 if (ctl->do_fsync && pg_fsync(fd))
808 slru_errcause = SLRU_FSYNC_FAILED;
816 slru_errcause = SLRU_CLOSE_FAILED;
826 * Issue the error message after failure of SlruPhysicalReadPage or
827 * SlruPhysicalWritePage. Call this after cleaning up shared-memory state.
830 SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
832 int segno = pageno / SLRU_PAGES_PER_SEGMENT;
833 int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
834 int offset = rpageno * BLCKSZ;
835 char path[MAXPGPATH];
837 SlruFileName(ctl, path, segno);
839 switch (slru_errcause)
841 case SLRU_OPEN_FAILED:
843 (errcode_for_file_access(),
844 errmsg("could not access status of transaction %u", xid),
845 errdetail("Could not open file \"%s\": %m.", path)));
847 case SLRU_SEEK_FAILED:
849 (errcode_for_file_access(),
850 errmsg("could not access status of transaction %u", xid),
851 errdetail("Could not seek in file \"%s\" to offset %u: %m.",
854 case SLRU_READ_FAILED:
856 (errcode_for_file_access(),
857 errmsg("could not access status of transaction %u", xid),
858 errdetail("Could not read from file \"%s\" at offset %u: %m.",
861 case SLRU_WRITE_FAILED:
863 (errcode_for_file_access(),
864 errmsg("could not access status of transaction %u", xid),
865 errdetail("Could not write to file \"%s\" at offset %u: %m.",
868 case SLRU_FSYNC_FAILED:
870 (errcode_for_file_access(),
871 errmsg("could not access status of transaction %u", xid),
872 errdetail("Could not fsync file \"%s\": %m.",
875 case SLRU_CLOSE_FAILED:
877 (errcode_for_file_access(),
878 errmsg("could not access status of transaction %u", xid),
879 errdetail("Could not close file \"%s\": %m.",
883 /* can't get here, we trust */
884 elog(ERROR, "unrecognized SimpleLru error cause: %d",
885 (int) slru_errcause);
891 * Select the slot to re-use when we need a free slot.
893 * The target page number is passed because we need to consider the
894 * possibility that some other process reads in the target page while
895 * we are doing I/O to free a slot. Hence, check or recheck to see if
896 * any slot already holds the target page, and return that slot if so.
897 * Thus, the returned slot is *either* a slot already holding the pageno
898 * (could be any state except EMPTY), *or* a freeable slot (state EMPTY
901 * Control lock must be held at entry, and will be held at exit.
904 SlruSelectLRUPage(SlruCtl ctl, int pageno)
906 SlruShared shared = ctl->shared;
908 /* Outer loop handles restart after I/O */
915 int best_page_number;
917 /* See if page already has a buffer assigned */
918 for (slotno = 0; slotno < shared->num_slots; slotno++)
920 if (shared->page_number[slotno] == pageno &&
921 shared->page_status[slotno] != SLRU_PAGE_EMPTY)
926 * If we find any EMPTY slot, just select that one. Else locate the
927 * least-recently-used slot to replace.
929 * Normally the page_lru_count values will all be different and so
930 * there will be a well-defined LRU page. But since we allow
931 * concurrent execution of SlruRecentlyUsed() within
932 * SimpleLruReadPage_ReadOnly(), it is possible that multiple pages
933 * acquire the same lru_count values. In that case we break ties by
934 * choosing the furthest-back page.
936 * In no case will we select the slot containing latest_page_number
937 * for replacement, even if it appears least recently used.
939 * Notice that this next line forcibly advances cur_lru_count to a
940 * value that is certainly beyond any value that will be in the
941 * page_lru_count array after the loop finishes. This ensures that
942 * the next execution of SlruRecentlyUsed will mark the page newly
943 * used, even if it's for a page that has the current counter value.
944 * That gets us back on the path to having good data when there are
945 * multiple pages with the same lru_count.
947 cur_count = (shared->cur_lru_count)++;
949 bestslot = 0; /* no-op, just keeps compiler quiet */
950 best_page_number = 0; /* ditto */
951 for (slotno = 0; slotno < shared->num_slots; slotno++)
954 int this_page_number;
956 if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
958 this_delta = cur_count - shared->page_lru_count[slotno];
962 * Clean up in case shared updates have caused cur_count
963 * increments to get "lost". We back off the page counts,
964 * rather than trying to increase cur_count, to avoid any
965 * question of infinite loops or failure in the presence of
966 * wrapped-around counts.
968 shared->page_lru_count[slotno] = cur_count;
971 this_page_number = shared->page_number[slotno];
972 if ((this_delta > best_delta ||
973 (this_delta == best_delta &&
974 ctl->PagePrecedes(this_page_number, best_page_number))) &&
975 this_page_number != shared->latest_page_number)
978 best_delta = this_delta;
979 best_page_number = this_page_number;
984 * If the selected page is clean, we're set.
986 if (shared->page_status[bestslot] == SLRU_PAGE_VALID &&
987 !shared->page_dirty[bestslot])
991 * We need to wait for I/O. Normal case is that it's dirty and we
992 * must initiate a write, but it's possible that the page is already
993 * write-busy, or in the worst case still read-busy. In those cases
994 * we wait for the existing I/O to complete.
996 if (shared->page_status[bestslot] == SLRU_PAGE_VALID)
997 SimpleLruWritePage(ctl, bestslot, NULL);
999 SimpleLruWaitIO(ctl, bestslot);
1002 * Now loop back and try again. This is the easiest way of dealing
1003 * with corner cases such as the victim page being re-dirtied while we
1010 * Flush dirty pages to disk during checkpoint or database shutdown
1013 SimpleLruFlush(SlruCtl ctl, bool checkpoint)
1015 SlruShared shared = ctl->shared;
1016 SlruFlushData fdata;
1023 * Find and write dirty pages
1025 fdata.num_files = 0;
1027 LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
1029 for (slotno = 0; slotno < shared->num_slots; slotno++)
1031 SimpleLruWritePage(ctl, slotno, &fdata);
1034 * When called during a checkpoint, we cannot assert that the slot is
1035 * clean now, since another process might have re-dirtied it already.
1038 Assert(checkpoint ||
1039 shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
1040 (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1041 !shared->page_dirty[slotno]));
1044 LWLockRelease(shared->ControlLock);
1047 * Now fsync and close any files that were open
1050 for (i = 0; i < fdata.num_files; i++)
1052 if (ctl->do_fsync && pg_fsync(fdata.fd[i]))
1054 slru_errcause = SLRU_FSYNC_FAILED;
1056 pageno = fdata.segno[i] * SLRU_PAGES_PER_SEGMENT;
1060 if (close(fdata.fd[i]))
1062 slru_errcause = SLRU_CLOSE_FAILED;
1064 pageno = fdata.segno[i] * SLRU_PAGES_PER_SEGMENT;
1069 SlruReportIOError(ctl, pageno, InvalidTransactionId);
1073 * Remove all segments before the one holding the passed page number
1076 SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
1078 SlruShared shared = ctl->shared;
1082 * The cutoff point is the start of the segment containing cutoffPage.
1084 cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT;
1087 * Scan shared memory and remove any pages preceding the cutoff page, to
1088 * ensure we won't rewrite them later. (Since this is normally called in
1089 * or just after a checkpoint, any dirty pages should have been flushed
1090 * already ... we're just being extra careful here.)
1092 LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
1097 * While we are holding the lock, make an important safety check: the
1098 * planned cutoff point must be <= the current endpoint page. Otherwise we
1099 * have already wrapped around, and proceeding with the truncation would
1100 * risk removing the current segment.
1102 if (ctl->PagePrecedes(shared->latest_page_number, cutoffPage))
1104 LWLockRelease(shared->ControlLock);
1106 (errmsg("could not truncate directory \"%s\": apparent wraparound",
1111 for (slotno = 0; slotno < shared->num_slots; slotno++)
1113 if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1115 if (!ctl->PagePrecedes(shared->page_number[slotno], cutoffPage))
1119 * If page is clean, just change state to EMPTY (expected case).
1121 if (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1122 !shared->page_dirty[slotno])
1124 shared->page_status[slotno] = SLRU_PAGE_EMPTY;
1129 * Hmm, we have (or may have) I/O operations acting on the page, so
1130 * we've got to wait for them to finish and then start again. This is
1131 * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
1132 * wouldn't it be OK to just discard it without writing it? For now,
1133 * keep the logic the same as it was.)
1135 if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1136 SimpleLruWritePage(ctl, slotno, NULL);
1138 SimpleLruWaitIO(ctl, slotno);
1142 LWLockRelease(shared->ControlLock);
1144 /* Now we can remove the old segment(s) */
1145 (void) SlruScanDirectory(ctl, cutoffPage, true);
1149 * SimpleLruTruncate subroutine: scan directory for removable segments.
1150 * Actually remove them iff doDeletions is true. Return TRUE iff any
1151 * removable segments were found. Note: no locking is needed.
1153 * This can be called directly from clog.c, for reasons explained there.
1156 SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions)
1160 struct dirent *clde;
1163 char path[MAXPGPATH];
1166 * The cutoff point is the start of the segment containing cutoffPage.
1167 * (This is redundant when called from SimpleLruTruncate, but not when
1168 * called directly from clog.c.)
1170 cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT;
1172 cldir = AllocateDir(ctl->Dir);
1173 while ((clde = ReadDir(cldir, ctl->Dir)) != NULL)
1175 if (strlen(clde->d_name) == 4 &&
1176 strspn(clde->d_name, "0123456789ABCDEF") == 4)
1178 segno = (int) strtol(clde->d_name, NULL, 16);
1179 segpage = segno * SLRU_PAGES_PER_SEGMENT;
1180 if (ctl->PagePrecedes(segpage, cutoffPage))
1185 snprintf(path, MAXPGPATH, "%s/%s", ctl->Dir, clde->d_name);
1187 (errmsg("removing file \"%s\"", path)));