OSDN Git Service

e26befacc6f1e97aaad8b556623eb4892669e860
[pg-rex/syncrep.git] / src / backend / postmaster / bgwriter.c
1 /*-------------------------------------------------------------------------
2  *
3  * bgwriter.c
4  *
5  * The background writer (bgwriter) is new as of Postgres 8.0.  It attempts
6  * to keep regular backends from having to write out dirty shared buffers
7  * (which they would only do when needing to free a shared buffer to read in
8  * another page).  In the best scenario all writes from shared buffers will
9  * be issued by the background writer process.  However, regular backends are
10  * still empowered to issue writes if the bgwriter fails to maintain enough
11  * clean shared buffers.
12  *
13  * The bgwriter is also charged with handling all checkpoints.  It will
14  * automatically dispatch a checkpoint after a certain amount of time has
15  * elapsed since the last one, and it can be signaled to perform requested
16  * checkpoints as well.  (The GUC parameter that mandates a checkpoint every
17  * so many WAL segments is implemented by having backends signal the bgwriter
18  * when they fill WAL segments; the bgwriter itself doesn't watch for the
19  * condition.)
20  *
21  * The bgwriter is started by the postmaster as soon as the startup subprocess
22  * finishes, or as soon as recovery begins if we are doing archive recovery.
23  * It remains alive until the postmaster commands it to terminate.
24  * Normal termination is by SIGUSR2, which instructs the bgwriter to execute
25  * a shutdown checkpoint and then exit(0).      (All backends must be stopped
26  * before SIGUSR2 is issued!)  Emergency termination is by SIGQUIT; like any
27  * backend, the bgwriter will simply abort and exit on SIGQUIT.
28  *
29  * If the bgwriter exits unexpectedly, the postmaster treats that the same
30  * as a backend crash: shared memory may be corrupted, so remaining backends
31  * should be killed by SIGQUIT and then a recovery cycle started.  (Even if
32  * shared memory isn't corrupted, we have lost information about which
33  * files need to be fsync'd for the next checkpoint, and so a system
34  * restart needs to be forced.)
35  *
36  *
37  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
38  *
39  *
40  * IDENTIFICATION
41  *        $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.65 2010/01/02 16:57:50 momjian Exp $
42  *
43  *-------------------------------------------------------------------------
44  */
45 #include "postgres.h"
46
47 #include <signal.h>
48 #include <sys/time.h>
49 #include <time.h>
50 #include <unistd.h>
51
52 #include "access/xlog_internal.h"
53 #include "catalog/pg_control.h"
54 #include "libpq/pqsignal.h"
55 #include "miscadmin.h"
56 #include "pgstat.h"
57 #include "postmaster/bgwriter.h"
58 #include "storage/bufmgr.h"
59 #include "storage/fd.h"
60 #include "storage/ipc.h"
61 #include "storage/lwlock.h"
62 #include "storage/pmsignal.h"
63 #include "storage/shmem.h"
64 #include "storage/smgr.h"
65 #include "storage/spin.h"
66 #include "tcop/tcopprot.h"
67 #include "utils/guc.h"
68 #include "utils/memutils.h"
69 #include "utils/resowner.h"
70
71
72 /*----------
73  * Shared memory area for communication between bgwriter and backends
74  *
75  * The ckpt counters allow backends to watch for completion of a checkpoint
76  * request they send.  Here's how it works:
77  *      * At start of a checkpoint, bgwriter reads (and clears) the request flags
78  *        and increments ckpt_started, while holding ckpt_lck.
79  *      * On completion of a checkpoint, bgwriter sets ckpt_done to
80  *        equal ckpt_started.
81  *      * On failure of a checkpoint, bgwriter increments ckpt_failed
82  *        and sets ckpt_done to equal ckpt_started.
83  *
84  * The algorithm for backends is:
85  *      1. Record current values of ckpt_failed and ckpt_started, and
86  *         set request flags, while holding ckpt_lck.
87  *      2. Send signal to request checkpoint.
88  *      3. Sleep until ckpt_started changes.  Now you know a checkpoint has
89  *         begun since you started this algorithm (although *not* that it was
90  *         specifically initiated by your signal), and that it is using your flags.
91  *      4. Record new value of ckpt_started.
92  *      5. Sleep until ckpt_done >= saved value of ckpt_started.  (Use modulo
93  *         arithmetic here in case counters wrap around.)  Now you know a
94  *         checkpoint has started and completed, but not whether it was
95  *         successful.
96  *      6. If ckpt_failed is different from the originally saved value,
97  *         assume request failed; otherwise it was definitely successful.
98  *
99  * ckpt_flags holds the OR of the checkpoint request flags sent by all
100  * requesting backends since the last checkpoint start.  The flags are
101  * chosen so that OR'ing is the correct way to combine multiple requests.
102  *
103  * num_backend_writes is used to count the number of buffer writes performed
104  * by non-bgwriter processes.  This counter should be wide enough that it
105  * can't overflow during a single bgwriter cycle.
106  *
107  * The requests array holds fsync requests sent by backends and not yet
108  * absorbed by the bgwriter.
109  *
110  * Unlike the checkpoint fields, num_backend_writes and the requests
111  * fields are protected by BgWriterCommLock.
112  *----------
113  */
114 typedef struct
115 {
116         RelFileNode rnode;
117         ForkNumber      forknum;
118         BlockNumber segno;                      /* see md.c for special values */
119         /* might add a real request-type field later; not needed yet */
120 } BgWriterRequest;
121
122 typedef struct
123 {
124         pid_t           bgwriter_pid;   /* PID of bgwriter (0 if not started) */
125
126         slock_t         ckpt_lck;               /* protects all the ckpt_* fields */
127
128         int                     ckpt_started;   /* advances when checkpoint starts */
129         int                     ckpt_done;              /* advances when checkpoint done */
130         int                     ckpt_failed;    /* advances when checkpoint fails */
131
132         int                     ckpt_flags;             /* checkpoint flags, as defined in xlog.h */
133
134         uint32          num_backend_writes;             /* counts non-bgwriter buffer writes */
135
136         int                     num_requests;   /* current # of requests */
137         int                     max_requests;   /* allocated array size */
138         BgWriterRequest requests[1];    /* VARIABLE LENGTH ARRAY */
139 } BgWriterShmemStruct;
140
141 static BgWriterShmemStruct *BgWriterShmem;
142
143 /* interval for calling AbsorbFsyncRequests in CheckpointWriteDelay */
144 #define WRITES_PER_ABSORB               1000
145
146 /*
147  * GUC parameters
148  */
149 int                     BgWriterDelay = 200;
150 int                     CheckPointTimeout = 300;
151 int                     CheckPointWarning = 30;
152 double          CheckPointCompletionTarget = 0.5;
153
154 /*
155  * Flags set by interrupt handlers for later service in the main loop.
156  */
157 static volatile sig_atomic_t got_SIGHUP = false;
158 static volatile sig_atomic_t checkpoint_requested = false;
159 static volatile sig_atomic_t shutdown_requested = false;
160
161 /*
162  * Private state
163  */
164 static bool am_bg_writer = false;
165
166 static bool ckpt_active = false;
167
168 /* these values are valid when ckpt_active is true: */
169 static pg_time_t ckpt_start_time;
170 static XLogRecPtr ckpt_start_recptr;
171 static double ckpt_cached_elapsed;
172
173 static pg_time_t last_checkpoint_time;
174 static pg_time_t last_xlog_switch_time;
175
176 /* Prototypes for private functions */
177
178 static void CheckArchiveTimeout(void);
179 static void BgWriterNap(void);
180 static bool IsCheckpointOnSchedule(double progress);
181 static bool ImmediateCheckpointRequested(void);
182
183 /* Signal handlers */
184
185 static void bg_quickdie(SIGNAL_ARGS);
186 static void BgSigHupHandler(SIGNAL_ARGS);
187 static void ReqCheckpointHandler(SIGNAL_ARGS);
188 static void ReqShutdownHandler(SIGNAL_ARGS);
189
190
191 /*
192  * Main entry point for bgwriter process
193  *
194  * This is invoked from BootstrapMain, which has already created the basic
195  * execution environment, but not enabled signals yet.
196  */
197 void
198 BackgroundWriterMain(void)
199 {
200         sigjmp_buf      local_sigjmp_buf;
201         MemoryContext bgwriter_context;
202
203         BgWriterShmem->bgwriter_pid = MyProcPid;
204         am_bg_writer = true;
205
206         /*
207          * If possible, make this process a group leader, so that the postmaster
208          * can signal any child processes too.  (bgwriter probably never has any
209          * child processes, but for consistency we make all postmaster child
210          * processes do this.)
211          */
212 #ifdef HAVE_SETSID
213         if (setsid() < 0)
214                 elog(FATAL, "setsid() failed: %m");
215 #endif
216
217         /*
218          * Properly accept or ignore signals the postmaster might send us
219          *
220          * Note: we deliberately ignore SIGTERM, because during a standard Unix
221          * system shutdown cycle, init will SIGTERM all processes at once.      We
222          * want to wait for the backends to exit, whereupon the postmaster will
223          * tell us it's okay to shut down (via SIGUSR2).
224          *
225          * SIGUSR1 is presently unused; keep it spare in case someday we want this
226          * process to participate in ProcSignal signalling.
227          */
228         pqsignal(SIGHUP, BgSigHupHandler);      /* set flag to read config file */
229         pqsignal(SIGINT, ReqCheckpointHandler);         /* request checkpoint */
230         pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
231         pqsignal(SIGQUIT, bg_quickdie);         /* hard crash time */
232         pqsignal(SIGALRM, SIG_IGN);
233         pqsignal(SIGPIPE, SIG_IGN);
234         pqsignal(SIGUSR1, SIG_IGN); /* reserve for ProcSignal */
235         pqsignal(SIGUSR2, ReqShutdownHandler);          /* request shutdown */
236
237         /*
238          * Reset some signals that are accepted by postmaster but not here
239          */
240         pqsignal(SIGCHLD, SIG_DFL);
241         pqsignal(SIGTTIN, SIG_DFL);
242         pqsignal(SIGTTOU, SIG_DFL);
243         pqsignal(SIGCONT, SIG_DFL);
244         pqsignal(SIGWINCH, SIG_DFL);
245
246         /* We allow SIGQUIT (quickdie) at all times */
247         sigdelset(&BlockSig, SIGQUIT);
248
249         /*
250          * Initialize so that first time-driven event happens at the correct time.
251          */
252         last_checkpoint_time = last_xlog_switch_time = (pg_time_t) time(NULL);
253
254         /*
255          * Create a resource owner to keep track of our resources (currently only
256          * buffer pins).
257          */
258         CurrentResourceOwner = ResourceOwnerCreate(NULL, "Background Writer");
259
260         /*
261          * Create a memory context that we will do all our work in.  We do this so
262          * that we can reset the context during error recovery and thereby avoid
263          * possible memory leaks.  Formerly this code just ran in
264          * TopMemoryContext, but resetting that would be a really bad idea.
265          */
266         bgwriter_context = AllocSetContextCreate(TopMemoryContext,
267                                                                                          "Background Writer",
268                                                                                          ALLOCSET_DEFAULT_MINSIZE,
269                                                                                          ALLOCSET_DEFAULT_INITSIZE,
270                                                                                          ALLOCSET_DEFAULT_MAXSIZE);
271         MemoryContextSwitchTo(bgwriter_context);
272
273         /*
274          * If an exception is encountered, processing resumes here.
275          *
276          * See notes in postgres.c about the design of this coding.
277          */
278         if (sigsetjmp(local_sigjmp_buf, 1) != 0)
279         {
280                 /* Since not using PG_TRY, must reset error stack by hand */
281                 error_context_stack = NULL;
282
283                 /* Prevent interrupts while cleaning up */
284                 HOLD_INTERRUPTS();
285
286                 /* Report the error to the server log */
287                 EmitErrorReport();
288
289                 /*
290                  * These operations are really just a minimal subset of
291                  * AbortTransaction().  We don't have very many resources to worry
292                  * about in bgwriter, but we do have LWLocks, buffers, and temp files.
293                  */
294                 LWLockReleaseAll();
295                 AbortBufferIO();
296                 UnlockBuffers();
297                 /* buffer pins are released here: */
298                 ResourceOwnerRelease(CurrentResourceOwner,
299                                                          RESOURCE_RELEASE_BEFORE_LOCKS,
300                                                          false, true);
301                 /* we needn't bother with the other ResourceOwnerRelease phases */
302                 AtEOXact_Buffers(false);
303                 AtEOXact_Files();
304                 AtEOXact_HashTables(false);
305
306                 /* Warn any waiting backends that the checkpoint failed. */
307                 if (ckpt_active)
308                 {
309                         /* use volatile pointer to prevent code rearrangement */
310                         volatile BgWriterShmemStruct *bgs = BgWriterShmem;
311
312                         SpinLockAcquire(&bgs->ckpt_lck);
313                         bgs->ckpt_failed++;
314                         bgs->ckpt_done = bgs->ckpt_started;
315                         SpinLockRelease(&bgs->ckpt_lck);
316
317                         ckpt_active = false;
318                 }
319
320                 /*
321                  * Now return to normal top-level context and clear ErrorContext for
322                  * next time.
323                  */
324                 MemoryContextSwitchTo(bgwriter_context);
325                 FlushErrorState();
326
327                 /* Flush any leaked data in the top-level context */
328                 MemoryContextResetAndDeleteChildren(bgwriter_context);
329
330                 /* Now we can allow interrupts again */
331                 RESUME_INTERRUPTS();
332
333                 /*
334                  * Sleep at least 1 second after any error.  A write error is likely
335                  * to be repeated, and we don't want to be filling the error logs as
336                  * fast as we can.
337                  */
338                 pg_usleep(1000000L);
339
340                 /*
341                  * Close all open files after any error.  This is helpful on Windows,
342                  * where holding deleted files open causes various strange errors.
343                  * It's not clear we need it elsewhere, but shouldn't hurt.
344                  */
345                 smgrcloseall();
346         }
347
348         /* We can now handle ereport(ERROR) */
349         PG_exception_stack = &local_sigjmp_buf;
350
351         /*
352          * Unblock signals (they were blocked when the postmaster forked us)
353          */
354         PG_SETMASK(&UnBlockSig);
355
356         /*
357          * Loop forever
358          */
359         for (;;)
360         {
361                 bool            do_checkpoint = false;
362                 int                     flags = 0;
363                 pg_time_t       now;
364                 int                     elapsed_secs;
365
366                 /*
367                  * Emergency bailout if postmaster has died.  This is to avoid the
368                  * necessity for manual cleanup of all postmaster children.
369                  */
370                 if (!PostmasterIsAlive(true))
371                         exit(1);
372
373                 /*
374                  * Process any requests or signals received recently.
375                  */
376                 AbsorbFsyncRequests();
377
378                 if (got_SIGHUP)
379                 {
380                         got_SIGHUP = false;
381                         ProcessConfigFile(PGC_SIGHUP);
382                 }
383                 if (checkpoint_requested)
384                 {
385                         checkpoint_requested = false;
386                         do_checkpoint = true;
387                         BgWriterStats.m_requested_checkpoints++;
388                 }
389                 if (shutdown_requested)
390                 {
391                         /*
392                          * From here on, elog(ERROR) should end with exit(1), not send
393                          * control back to the sigsetjmp block above
394                          */
395                         ExitOnAnyError = true;
396                         /* Close down the database */
397                         ShutdownXLOG(0, 0);
398                         /* Normal exit from the bgwriter is here */
399                         proc_exit(0);           /* done */
400                 }
401
402                 /*
403                  * Force a checkpoint if too much time has elapsed since the last one.
404                  * Note that we count a timed checkpoint in stats only when this
405                  * occurs without an external request, but we set the CAUSE_TIME flag
406                  * bit even if there is also an external request.
407                  */
408                 now = (pg_time_t) time(NULL);
409                 elapsed_secs = now - last_checkpoint_time;
410                 if (elapsed_secs >= CheckPointTimeout)
411                 {
412                         if (!do_checkpoint)
413                                 BgWriterStats.m_timed_checkpoints++;
414                         do_checkpoint = true;
415                         flags |= CHECKPOINT_CAUSE_TIME;
416                 }
417
418                 /*
419                  * Do a checkpoint if requested, otherwise do one cycle of
420                  * dirty-buffer writing.
421                  */
422                 if (do_checkpoint)
423                 {
424                         bool            ckpt_performed = false;
425                         bool            do_restartpoint;
426
427                         /* use volatile pointer to prevent code rearrangement */
428                         volatile BgWriterShmemStruct *bgs = BgWriterShmem;
429
430                         /*
431                          * Check if we should perform a checkpoint or a restartpoint. As a
432                          * side-effect, RecoveryInProgress() initializes TimeLineID if
433                          * it's not set yet.
434                          */
435                         do_restartpoint = RecoveryInProgress();
436
437                         /*
438                          * Atomically fetch the request flags to figure out what kind of a
439                          * checkpoint we should perform, and increase the started-counter
440                          * to acknowledge that we've started a new checkpoint.
441                          */
442                         SpinLockAcquire(&bgs->ckpt_lck);
443                         flags |= bgs->ckpt_flags;
444                         bgs->ckpt_flags = 0;
445                         bgs->ckpt_started++;
446                         SpinLockRelease(&bgs->ckpt_lck);
447
448                         /*
449                          * The end-of-recovery checkpoint is a real checkpoint that's
450                          * performed while we're still in recovery.
451                          */
452                         if (flags & CHECKPOINT_END_OF_RECOVERY)
453                                 do_restartpoint = false;
454
455                         /*
456                          * We will warn if (a) too soon since last checkpoint (whatever
457                          * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag
458                          * since the last checkpoint start.  Note in particular that this
459                          * implementation will not generate warnings caused by
460                          * CheckPointTimeout < CheckPointWarning.
461                          */
462                         if (!do_restartpoint &&
463                                 (flags & CHECKPOINT_CAUSE_XLOG) &&
464                                 elapsed_secs < CheckPointWarning)
465                                 ereport(LOG,
466                                                 (errmsg_plural("checkpoints are occurring too frequently (%d second apart)",
467                                 "checkpoints are occurring too frequently (%d seconds apart)",
468                                                                            elapsed_secs,
469                                                                            elapsed_secs),
470                                                  errhint("Consider increasing the configuration parameter \"checkpoint_segments\".")));
471
472                         /*
473                          * Initialize bgwriter-private variables used during checkpoint.
474                          */
475                         ckpt_active = true;
476                         if (!do_restartpoint)
477                                 ckpt_start_recptr = GetInsertRecPtr();
478                         ckpt_start_time = now;
479                         ckpt_cached_elapsed = 0;
480
481                         /*
482                          * Do the checkpoint.
483                          */
484                         if (!do_restartpoint)
485                         {
486                                 CreateCheckPoint(flags);
487                                 ckpt_performed = true;
488                         }
489                         else
490                                 ckpt_performed = CreateRestartPoint(flags);
491
492                         /*
493                          * After any checkpoint, close all smgr files.  This is so we
494                          * won't hang onto smgr references to deleted files indefinitely.
495                          */
496                         smgrcloseall();
497
498                         /*
499                          * Indicate checkpoint completion to any waiting backends.
500                          */
501                         SpinLockAcquire(&bgs->ckpt_lck);
502                         bgs->ckpt_done = bgs->ckpt_started;
503                         SpinLockRelease(&bgs->ckpt_lck);
504
505                         if (ckpt_performed)
506                         {
507                                 /*
508                                  * Note we record the checkpoint start time not end time as
509                                  * last_checkpoint_time.  This is so that time-driven
510                                  * checkpoints happen at a predictable spacing.
511                                  */
512                                 last_checkpoint_time = now;
513                         }
514                         else
515                         {
516                                 /*
517                                  * We were not able to perform the restartpoint (checkpoints
518                                  * throw an ERROR in case of error).  Most likely because we
519                                  * have not received any new checkpoint WAL records since the
520                                  * last restartpoint. Try again in 15 s.
521                                  */
522                                 last_checkpoint_time = now - CheckPointTimeout + 15;
523                         }
524
525                         ckpt_active = false;
526                 }
527                 else
528                         BgBufferSync();
529
530                 /* Check for archive_timeout and switch xlog files if necessary. */
531                 CheckArchiveTimeout();
532
533                 /* Nap for the configured time. */
534                 BgWriterNap();
535         }
536 }
537
538 /*
539  * CheckArchiveTimeout -- check for archive_timeout and switch xlog files
540  *              if needed
541  */
542 static void
543 CheckArchiveTimeout(void)
544 {
545         pg_time_t       now;
546         pg_time_t       last_time;
547
548         if (XLogArchiveTimeout <= 0 || RecoveryInProgress())
549                 return;
550
551         now = (pg_time_t) time(NULL);
552
553         /* First we do a quick check using possibly-stale local state. */
554         if ((int) (now - last_xlog_switch_time) < XLogArchiveTimeout)
555                 return;
556
557         /*
558          * Update local state ... note that last_xlog_switch_time is the last time
559          * a switch was performed *or requested*.
560          */
561         last_time = GetLastSegSwitchTime();
562
563         last_xlog_switch_time = Max(last_xlog_switch_time, last_time);
564
565         /* Now we can do the real check */
566         if ((int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
567         {
568                 XLogRecPtr      switchpoint;
569
570                 /* OK, it's time to switch */
571                 switchpoint = RequestXLogSwitch();
572
573                 /*
574                  * If the returned pointer points exactly to a segment boundary,
575                  * assume nothing happened.
576                  */
577                 if ((switchpoint.xrecoff % XLogSegSize) != 0)
578                         ereport(DEBUG1,
579                                 (errmsg("transaction log switch forced (archive_timeout=%d)",
580                                                 XLogArchiveTimeout)));
581
582                 /*
583                  * Update state in any case, so we don't retry constantly when the
584                  * system is idle.
585                  */
586                 last_xlog_switch_time = now;
587         }
588 }
589
590 /*
591  * BgWriterNap -- Nap for the configured time or until a signal is received.
592  */
593 static void
594 BgWriterNap(void)
595 {
596         long            udelay;
597
598         /*
599          * Send off activity statistics to the stats collector
600          */
601         pgstat_send_bgwriter();
602
603         /*
604          * Nap for the configured time, or sleep for 10 seconds if there is no
605          * bgwriter activity configured.
606          *
607          * On some platforms, signals won't interrupt the sleep.  To ensure we
608          * respond reasonably promptly when someone signals us, break down the
609          * sleep into 1-second increments, and check for interrupts after each
610          * nap.
611          *
612          * We absorb pending requests after each short sleep.
613          */
614         if (bgwriter_lru_maxpages > 0 || ckpt_active)
615                 udelay = BgWriterDelay * 1000L;
616         else if (XLogArchiveTimeout > 0)
617                 udelay = 1000000L;              /* One second */
618         else
619                 udelay = 10000000L;             /* Ten seconds */
620
621         while (udelay > 999999L)
622         {
623                 if (got_SIGHUP || shutdown_requested ||
624                 (ckpt_active ? ImmediateCheckpointRequested() : checkpoint_requested))
625                         break;
626                 pg_usleep(1000000L);
627                 AbsorbFsyncRequests();
628                 udelay -= 1000000L;
629         }
630
631         if (!(got_SIGHUP || shutdown_requested ||
632           (ckpt_active ? ImmediateCheckpointRequested() : checkpoint_requested)))
633                 pg_usleep(udelay);
634 }
635
636 /*
637  * Returns true if an immediate checkpoint request is pending.  (Note that
638  * this does not check the *current* checkpoint's IMMEDIATE flag, but whether
639  * there is one pending behind it.)
640  */
641 static bool
642 ImmediateCheckpointRequested(void)
643 {
644         if (checkpoint_requested)
645         {
646                 volatile BgWriterShmemStruct *bgs = BgWriterShmem;
647
648                 /*
649                  * We don't need to acquire the ckpt_lck in this case because we're
650                  * only looking at a single flag bit.
651                  */
652                 if (bgs->ckpt_flags & CHECKPOINT_IMMEDIATE)
653                         return true;
654         }
655         return false;
656 }
657
658 /*
659  * CheckpointWriteDelay -- yield control to bgwriter during a checkpoint
660  *
661  * This function is called after each page write performed by BufferSync().
662  * It is responsible for keeping the bgwriter's normal activities in
663  * progress during a long checkpoint, and for throttling BufferSync()'s
664  * write rate to hit checkpoint_completion_target.
665  *
666  * The checkpoint request flags should be passed in; currently the only one
667  * examined is CHECKPOINT_IMMEDIATE, which disables delays between writes.
668  *
669  * 'progress' is an estimate of how much of the work has been done, as a
670  * fraction between 0.0 meaning none, and 1.0 meaning all done.
671  */
672 void
673 CheckpointWriteDelay(int flags, double progress)
674 {
675         static int      absorb_counter = WRITES_PER_ABSORB;
676
677         /* Do nothing if checkpoint is being executed by non-bgwriter process */
678         if (!am_bg_writer)
679                 return;
680
681         /*
682          * Perform the usual bgwriter duties and take a nap, unless we're behind
683          * schedule, in which case we just try to catch up as quickly as possible.
684          */
685         if (!(flags & CHECKPOINT_IMMEDIATE) &&
686                 !shutdown_requested &&
687                 !ImmediateCheckpointRequested() &&
688                 IsCheckpointOnSchedule(progress))
689         {
690                 if (got_SIGHUP)
691                 {
692                         got_SIGHUP = false;
693                         ProcessConfigFile(PGC_SIGHUP);
694                 }
695
696                 AbsorbFsyncRequests();
697                 absorb_counter = WRITES_PER_ABSORB;
698
699                 BgBufferSync();
700                 CheckArchiveTimeout();
701                 BgWriterNap();
702         }
703         else if (--absorb_counter <= 0)
704         {
705                 /*
706                  * Absorb pending fsync requests after each WRITES_PER_ABSORB write
707                  * operations even when we don't sleep, to prevent overflow of the
708                  * fsync request queue.
709                  */
710                 AbsorbFsyncRequests();
711                 absorb_counter = WRITES_PER_ABSORB;
712         }
713 }
714
715 /*
716  * IsCheckpointOnSchedule -- are we on schedule to finish this checkpoint
717  *               in time?
718  *
719  * Compares the current progress against the time/segments elapsed since last
720  * checkpoint, and returns true if the progress we've made this far is greater
721  * than the elapsed time/segments.
722  */
723 static bool
724 IsCheckpointOnSchedule(double progress)
725 {
726         XLogRecPtr      recptr;
727         struct timeval now;
728         double          elapsed_xlogs,
729                                 elapsed_time;
730
731         Assert(ckpt_active);
732
733         /* Scale progress according to checkpoint_completion_target. */
734         progress *= CheckPointCompletionTarget;
735
736         /*
737          * Check against the cached value first. Only do the more expensive
738          * calculations once we reach the target previously calculated. Since
739          * neither time or WAL insert pointer moves backwards, a freshly
740          * calculated value can only be greater than or equal to the cached value.
741          */
742         if (progress < ckpt_cached_elapsed)
743                 return false;
744
745         /*
746          * Check progress against WAL segments written and checkpoint_segments.
747          *
748          * We compare the current WAL insert location against the location
749          * computed before calling CreateCheckPoint. The code in XLogInsert that
750          * actually triggers a checkpoint when checkpoint_segments is exceeded
751          * compares against RedoRecptr, so this is not completely accurate.
752          * However, it's good enough for our purposes, we're only calculating an
753          * estimate anyway.
754          */
755         if (!RecoveryInProgress())
756         {
757                 recptr = GetInsertRecPtr();
758                 elapsed_xlogs =
759                         (((double) (int32) (recptr.xlogid - ckpt_start_recptr.xlogid)) * XLogSegsPerFile +
760                          ((double) recptr.xrecoff - (double) ckpt_start_recptr.xrecoff) / XLogSegSize) /
761                         CheckPointSegments;
762
763                 if (progress < elapsed_xlogs)
764                 {
765                         ckpt_cached_elapsed = elapsed_xlogs;
766                         return false;
767                 }
768         }
769
770         /*
771          * Check progress against time elapsed and checkpoint_timeout.
772          */
773         gettimeofday(&now, NULL);
774         elapsed_time = ((double) ((pg_time_t) now.tv_sec - ckpt_start_time) +
775                                         now.tv_usec / 1000000.0) / CheckPointTimeout;
776
777         if (progress < elapsed_time)
778         {
779                 ckpt_cached_elapsed = elapsed_time;
780                 return false;
781         }
782
783         /* It looks like we're on schedule. */
784         return true;
785 }
786
787
788 /* --------------------------------
789  *              signal handler routines
790  * --------------------------------
791  */
792
793 /*
794  * bg_quickdie() occurs when signalled SIGQUIT by the postmaster.
795  *
796  * Some backend has bought the farm,
797  * so we need to stop what we're doing and exit.
798  */
799 static void
800 bg_quickdie(SIGNAL_ARGS)
801 {
802         PG_SETMASK(&BlockSig);
803
804         /*
805          * We DO NOT want to run proc_exit() callbacks -- we're here because
806          * shared memory may be corrupted, so we don't want to try to clean up our
807          * transaction.  Just nail the windows shut and get out of town.  Now that
808          * there's an atexit callback to prevent third-party code from breaking
809          * things by calling exit() directly, we have to reset the callbacks
810          * explicitly to make this work as intended.
811          */
812         on_exit_reset();
813
814         /*
815          * Note we do exit(2) not exit(0).      This is to force the postmaster into a
816          * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
817          * backend.  This is necessary precisely because we don't clean up our
818          * shared memory state.  (The "dead man switch" mechanism in pmsignal.c
819          * should ensure the postmaster sees this as a crash, too, but no harm in
820          * being doubly sure.)
821          */
822         exit(2);
823 }
824
825 /* SIGHUP: set flag to re-read config file at next convenient time */
826 static void
827 BgSigHupHandler(SIGNAL_ARGS)
828 {
829         got_SIGHUP = true;
830 }
831
832 /* SIGINT: set flag to run a normal checkpoint right away */
833 static void
834 ReqCheckpointHandler(SIGNAL_ARGS)
835 {
836         checkpoint_requested = true;
837 }
838
839 /* SIGUSR2: set flag to run a shutdown checkpoint and exit */
840 static void
841 ReqShutdownHandler(SIGNAL_ARGS)
842 {
843         shutdown_requested = true;
844 }
845
846
847 /* --------------------------------
848  *              communication with backends
849  * --------------------------------
850  */
851
852 /*
853  * BgWriterShmemSize
854  *              Compute space needed for bgwriter-related shared memory
855  */
856 Size
857 BgWriterShmemSize(void)
858 {
859         Size            size;
860
861         /*
862          * Currently, the size of the requests[] array is arbitrarily set equal to
863          * NBuffers.  This may prove too large or small ...
864          */
865         size = offsetof(BgWriterShmemStruct, requests);
866         size = add_size(size, mul_size(NBuffers, sizeof(BgWriterRequest)));
867
868         return size;
869 }
870
871 /*
872  * BgWriterShmemInit
873  *              Allocate and initialize bgwriter-related shared memory
874  */
875 void
876 BgWriterShmemInit(void)
877 {
878         bool            found;
879
880         BgWriterShmem = (BgWriterShmemStruct *)
881                 ShmemInitStruct("Background Writer Data",
882                                                 BgWriterShmemSize(),
883                                                 &found);
884         if (BgWriterShmem == NULL)
885                 ereport(FATAL,
886                                 (errcode(ERRCODE_OUT_OF_MEMORY),
887                                  errmsg("not enough shared memory for background writer")));
888         if (found)
889                 return;                                 /* already initialized */
890
891         MemSet(BgWriterShmem, 0, sizeof(BgWriterShmemStruct));
892         SpinLockInit(&BgWriterShmem->ckpt_lck);
893         BgWriterShmem->max_requests = NBuffers;
894 }
895
896 /*
897  * RequestCheckpoint
898  *              Called in backend processes to request a checkpoint
899  *
900  * flags is a bitwise OR of the following:
901  *      CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
902  *      CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
903  *      CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP,
904  *              ignoring checkpoint_completion_target parameter.
905  *      CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occured
906  *              since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
907  *              CHECKPOINT_END_OF_RECOVERY).
908  *      CHECKPOINT_WAIT: wait for completion before returning (otherwise,
909  *              just signal bgwriter to do it, and return).
910  *      CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
911  *              (This affects logging, and in particular enables CheckPointWarning.)
912  */
913 void
914 RequestCheckpoint(int flags)
915 {
916         /* use volatile pointer to prevent code rearrangement */
917         volatile BgWriterShmemStruct *bgs = BgWriterShmem;
918         int                     ntries;
919         int                     old_failed,
920                                 old_started;
921
922         /*
923          * If in a standalone backend, just do it ourselves.
924          */
925         if (!IsPostmasterEnvironment)
926         {
927                 /*
928                  * There's no point in doing slow checkpoints in a standalone backend,
929                  * because there's no other backends the checkpoint could disrupt.
930                  */
931                 CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE);
932
933                 /*
934                  * After any checkpoint, close all smgr files.  This is so we won't
935                  * hang onto smgr references to deleted files indefinitely.
936                  */
937                 smgrcloseall();
938
939                 return;
940         }
941
942         /*
943          * Atomically set the request flags, and take a snapshot of the counters.
944          * When we see ckpt_started > old_started, we know the flags we set here
945          * have been seen by bgwriter.
946          *
947          * Note that we OR the flags with any existing flags, to avoid overriding
948          * a "stronger" request by another backend.  The flag senses must be
949          * chosen to make this work!
950          */
951         SpinLockAcquire(&bgs->ckpt_lck);
952
953         old_failed = bgs->ckpt_failed;
954         old_started = bgs->ckpt_started;
955         bgs->ckpt_flags |= flags;
956
957         SpinLockRelease(&bgs->ckpt_lck);
958
959         /*
960          * Send signal to request checkpoint.  It's possible that the bgwriter
961          * hasn't started yet, or is in process of restarting, so we will retry a
962          * few times if needed.  Also, if not told to wait for the checkpoint to
963          * occur, we consider failure to send the signal to be nonfatal and merely
964          * LOG it.
965          */
966         for (ntries = 0;; ntries++)
967         {
968                 if (BgWriterShmem->bgwriter_pid == 0)
969                 {
970                         if (ntries >= 20)       /* max wait 2.0 sec */
971                         {
972                                 elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
973                                 "could not request checkpoint because bgwriter not running");
974                                 break;
975                         }
976                 }
977                 else if (kill(BgWriterShmem->bgwriter_pid, SIGINT) != 0)
978                 {
979                         if (ntries >= 20)       /* max wait 2.0 sec */
980                         {
981                                 elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
982                                          "could not signal for checkpoint: %m");
983                                 break;
984                         }
985                 }
986                 else
987                         break;                          /* signal sent successfully */
988
989                 CHECK_FOR_INTERRUPTS();
990                 pg_usleep(100000L);             /* wait 0.1 sec, then retry */
991         }
992
993         /*
994          * If requested, wait for completion.  We detect completion according to
995          * the algorithm given above.
996          */
997         if (flags & CHECKPOINT_WAIT)
998         {
999                 int                     new_started,
1000                                         new_failed;
1001
1002                 /* Wait for a new checkpoint to start. */
1003                 for (;;)
1004                 {
1005                         SpinLockAcquire(&bgs->ckpt_lck);
1006                         new_started = bgs->ckpt_started;
1007                         SpinLockRelease(&bgs->ckpt_lck);
1008
1009                         if (new_started != old_started)
1010                                 break;
1011
1012                         CHECK_FOR_INTERRUPTS();
1013                         pg_usleep(100000L);
1014                 }
1015
1016                 /*
1017                  * We are waiting for ckpt_done >= new_started, in a modulo sense.
1018                  */
1019                 for (;;)
1020                 {
1021                         int                     new_done;
1022
1023                         SpinLockAcquire(&bgs->ckpt_lck);
1024                         new_done = bgs->ckpt_done;
1025                         new_failed = bgs->ckpt_failed;
1026                         SpinLockRelease(&bgs->ckpt_lck);
1027
1028                         if (new_done - new_started >= 0)
1029                                 break;
1030
1031                         CHECK_FOR_INTERRUPTS();
1032                         pg_usleep(100000L);
1033                 }
1034
1035                 if (new_failed != old_failed)
1036                         ereport(ERROR,
1037                                         (errmsg("checkpoint request failed"),
1038                                          errhint("Consult recent messages in the server log for details.")));
1039         }
1040 }
1041
1042 /*
1043  * ForwardFsyncRequest
1044  *              Forward a file-fsync request from a backend to the bgwriter
1045  *
1046  * Whenever a backend is compelled to write directly to a relation
1047  * (which should be seldom, if the bgwriter is getting its job done),
1048  * the backend calls this routine to pass over knowledge that the relation
1049  * is dirty and must be fsync'd before next checkpoint.  We also use this
1050  * opportunity to count such writes for statistical purposes.
1051  *
1052  * segno specifies which segment (not block!) of the relation needs to be
1053  * fsync'd.  (Since the valid range is much less than BlockNumber, we can
1054  * use high values for special flags; that's all internal to md.c, which
1055  * see for details.)
1056  *
1057  * If we are unable to pass over the request (at present, this can happen
1058  * if the shared memory queue is full), we return false.  That forces
1059  * the backend to do its own fsync.  We hope that will be even more seldom.
1060  *
1061  * Note: we presently make no attempt to eliminate duplicate requests
1062  * in the requests[] queue.  The bgwriter will have to eliminate dups
1063  * internally anyway, so we may as well avoid holding the lock longer
1064  * than we have to here.
1065  */
1066 bool
1067 ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
1068 {
1069         BgWriterRequest *request;
1070
1071         if (!IsUnderPostmaster)
1072                 return false;                   /* probably shouldn't even get here */
1073
1074         if (am_bg_writer)
1075                 elog(ERROR, "ForwardFsyncRequest must not be called in bgwriter");
1076
1077         LWLockAcquire(BgWriterCommLock, LW_EXCLUSIVE);
1078
1079         /* we count non-bgwriter writes even when the request queue overflows */
1080         BgWriterShmem->num_backend_writes++;
1081
1082         if (BgWriterShmem->bgwriter_pid == 0 ||
1083                 BgWriterShmem->num_requests >= BgWriterShmem->max_requests)
1084         {
1085                 LWLockRelease(BgWriterCommLock);
1086                 return false;
1087         }
1088         request = &BgWriterShmem->requests[BgWriterShmem->num_requests++];
1089         request->rnode = rnode;
1090         request->forknum = forknum;
1091         request->segno = segno;
1092         LWLockRelease(BgWriterCommLock);
1093         return true;
1094 }
1095
1096 /*
1097  * AbsorbFsyncRequests
1098  *              Retrieve queued fsync requests and pass them to local smgr.
1099  *
1100  * This is exported because it must be called during CreateCheckPoint;
1101  * we have to be sure we have accepted all pending requests just before
1102  * we start fsync'ing.  Since CreateCheckPoint sometimes runs in
1103  * non-bgwriter processes, do nothing if not bgwriter.
1104  */
1105 void
1106 AbsorbFsyncRequests(void)
1107 {
1108         BgWriterRequest *requests = NULL;
1109         BgWriterRequest *request;
1110         int                     n;
1111
1112         if (!am_bg_writer)
1113                 return;
1114
1115         /*
1116          * We have to PANIC if we fail to absorb all the pending requests (eg,
1117          * because our hashtable runs out of memory).  This is because the system
1118          * cannot run safely if we are unable to fsync what we have been told to
1119          * fsync.  Fortunately, the hashtable is so small that the problem is
1120          * quite unlikely to arise in practice.
1121          */
1122         START_CRIT_SECTION();
1123
1124         /*
1125          * We try to avoid holding the lock for a long time by copying the request
1126          * array.
1127          */
1128         LWLockAcquire(BgWriterCommLock, LW_EXCLUSIVE);
1129
1130         /* Transfer write count into pending pgstats message */
1131         BgWriterStats.m_buf_written_backend += BgWriterShmem->num_backend_writes;
1132         BgWriterShmem->num_backend_writes = 0;
1133
1134         n = BgWriterShmem->num_requests;
1135         if (n > 0)
1136         {
1137                 requests = (BgWriterRequest *) palloc(n * sizeof(BgWriterRequest));
1138                 memcpy(requests, BgWriterShmem->requests, n * sizeof(BgWriterRequest));
1139         }
1140         BgWriterShmem->num_requests = 0;
1141
1142         LWLockRelease(BgWriterCommLock);
1143
1144         for (request = requests; n > 0; request++, n--)
1145                 RememberFsyncRequest(request->rnode, request->forknum, request->segno);
1146
1147         if (requests)
1148                 pfree(requests);
1149
1150         END_CRIT_SECTION();
1151 }