1 // SPDX-License-Identifier: GPL-2.0
3 * NTP state machine interfaces and logic.
5 * This code was mainly moved from kernel/timer.c and kernel/time.c
6 * Please see those files for relevant copyright info and historical
9 #include <linux/capability.h>
10 #include <linux/clocksource.h>
11 #include <linux/workqueue.h>
12 #include <linux/hrtimer.h>
13 #include <linux/jiffies.h>
14 #include <linux/math64.h>
15 #include <linux/timex.h>
16 #include <linux/time.h>
18 #include <linux/module.h>
19 #include <linux/rtc.h>
20 #include <linux/math64.h>
22 #include "ntp_internal.h"
23 #include "timekeeping_internal.h"
27 * NTP timekeeping variables:
29 * Note: All of the NTP state is protected by the timekeeping locks.
33 /* USER_HZ period (usecs): */
34 unsigned long tick_usec = USER_TICK_USEC;
36 /* SHIFTED_HZ period (nsecs): */
37 unsigned long tick_nsec;
39 static u64 tick_length;
40 static u64 tick_length_base;
42 #define SECS_PER_DAY 86400
43 #define MAX_TICKADJ 500LL /* usecs */
44 #define MAX_TICKADJ_SCALED \
45 (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
48 * phase-lock loop variables
52 * clock synchronization status
54 * (TIME_ERROR prevents overwriting the CMOS clock)
56 static int time_state = TIME_OK;
58 /* clock status bits: */
59 static int time_status = STA_UNSYNC;
61 /* time adjustment (nsecs): */
62 static s64 time_offset;
64 /* pll time constant: */
65 static long time_constant = 2;
67 /* maximum error (usecs): */
68 static long time_maxerror = NTP_PHASE_LIMIT;
70 /* estimated error (usecs): */
71 static long time_esterror = NTP_PHASE_LIMIT;
73 /* frequency offset (scaled nsecs/secs): */
76 /* time at last adjustment (secs): */
77 static time64_t time_reftime;
79 static long time_adjust;
81 /* constant (boot-param configurable) NTP tick adjustment (upscaled) */
82 static s64 ntp_tick_adj;
84 /* second value of the next pending leapsecond, or TIME64_MAX if no leap */
85 static time64_t ntp_next_leap_sec = TIME64_MAX;
90 * The following variables are used when a pulse-per-second (PPS) signal
91 * is available. They establish the engineering parameters of the clock
92 * discipline loop when controlled by the PPS signal.
94 #define PPS_VALID 10 /* PPS signal watchdog max (s) */
95 #define PPS_POPCORN 4 /* popcorn spike threshold (shift) */
96 #define PPS_INTMIN 2 /* min freq interval (s) (shift) */
97 #define PPS_INTMAX 8 /* max freq interval (s) (shift) */
98 #define PPS_INTCOUNT 4 /* number of consecutive good intervals to
99 increase pps_shift or consecutive bad
100 intervals to decrease it */
101 #define PPS_MAXWANDER 100000 /* max PPS freq wander (ns/s) */
103 static int pps_valid; /* signal watchdog counter */
104 static long pps_tf[3]; /* phase median filter */
105 static long pps_jitter; /* current jitter (ns) */
106 static struct timespec64 pps_fbase; /* beginning of the last freq interval */
107 static int pps_shift; /* current interval duration (s) (shift) */
108 static int pps_intcnt; /* interval counter */
109 static s64 pps_freq; /* frequency offset (scaled ns/s) */
110 static long pps_stabil; /* current stability (scaled ns/s) */
113 * PPS signal quality monitors
115 static long pps_calcnt; /* calibration intervals */
116 static long pps_jitcnt; /* jitter limit exceeded */
117 static long pps_stbcnt; /* stability limit exceeded */
118 static long pps_errcnt; /* calibration errors */
121 /* PPS kernel consumer compensates the whole phase error immediately.
122 * Otherwise, reduce the offset by a fixed factor times the time constant.
124 static inline s64 ntp_offset_chunk(s64 offset)
126 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
129 return shift_right(offset, SHIFT_PLL + time_constant);
132 static inline void pps_reset_freq_interval(void)
134 /* the PPS calibration interval may end
135 surprisingly early */
136 pps_shift = PPS_INTMIN;
141 * pps_clear - Clears the PPS state variables
143 static inline void pps_clear(void)
145 pps_reset_freq_interval();
149 pps_fbase.tv_sec = pps_fbase.tv_nsec = 0;
153 /* Decrease pps_valid to indicate that another second has passed since
154 * the last PPS signal. When it reaches 0, indicate that PPS signal is
157 static inline void pps_dec_valid(void)
162 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
163 STA_PPSWANDER | STA_PPSERROR);
168 static inline void pps_set_freq(s64 freq)
173 static inline int is_error_status(int status)
175 return (status & (STA_UNSYNC|STA_CLOCKERR))
176 /* PPS signal lost when either PPS time or
177 * PPS frequency synchronization requested
179 || ((status & (STA_PPSFREQ|STA_PPSTIME))
180 && !(status & STA_PPSSIGNAL))
181 /* PPS jitter exceeded when
182 * PPS time synchronization requested */
183 || ((status & (STA_PPSTIME|STA_PPSJITTER))
184 == (STA_PPSTIME|STA_PPSJITTER))
185 /* PPS wander exceeded or calibration error when
186 * PPS frequency synchronization requested
188 || ((status & STA_PPSFREQ)
189 && (status & (STA_PPSWANDER|STA_PPSERROR)));
192 static inline void pps_fill_timex(struct timex *txc)
194 txc->ppsfreq = shift_right((pps_freq >> PPM_SCALE_INV_SHIFT) *
195 PPM_SCALE_INV, NTP_SCALE_SHIFT);
196 txc->jitter = pps_jitter;
197 if (!(time_status & STA_NANO))
198 txc->jitter /= NSEC_PER_USEC;
199 txc->shift = pps_shift;
200 txc->stabil = pps_stabil;
201 txc->jitcnt = pps_jitcnt;
202 txc->calcnt = pps_calcnt;
203 txc->errcnt = pps_errcnt;
204 txc->stbcnt = pps_stbcnt;
207 #else /* !CONFIG_NTP_PPS */
209 static inline s64 ntp_offset_chunk(s64 offset)
211 return shift_right(offset, SHIFT_PLL + time_constant);
214 static inline void pps_reset_freq_interval(void) {}
215 static inline void pps_clear(void) {}
216 static inline void pps_dec_valid(void) {}
217 static inline void pps_set_freq(s64 freq) {}
219 static inline int is_error_status(int status)
221 return status & (STA_UNSYNC|STA_CLOCKERR);
224 static inline void pps_fill_timex(struct timex *txc)
226 /* PPS is not implemented, so these are zero */
237 #endif /* CONFIG_NTP_PPS */
241 * ntp_synced - Returns 1 if the NTP status is not UNSYNC
244 static inline int ntp_synced(void)
246 return !(time_status & STA_UNSYNC);
255 * Update (tick_length, tick_length_base, tick_nsec), based
256 * on (tick_usec, ntp_tick_adj, time_freq):
258 static void ntp_update_frequency(void)
263 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
266 second_length += ntp_tick_adj;
267 second_length += time_freq;
269 tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
270 new_base = div_u64(second_length, NTP_INTERVAL_FREQ);
273 * Don't wait for the next second_overflow, apply
274 * the change to the tick length immediately:
276 tick_length += new_base - tick_length_base;
277 tick_length_base = new_base;
280 static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
282 time_status &= ~STA_MODE;
287 if (!(time_status & STA_FLL) && (secs <= MAXSEC))
290 time_status |= STA_MODE;
292 return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
295 static void ntp_update_offset(long offset)
301 if (!(time_status & STA_PLL))
304 if (!(time_status & STA_NANO)) {
305 /* Make sure the multiplication below won't overflow */
306 offset = clamp(offset, -USEC_PER_SEC, USEC_PER_SEC);
307 offset *= NSEC_PER_USEC;
311 * Scale the phase adjustment and
312 * clamp to the operating range.
314 offset = clamp(offset, -MAXPHASE, MAXPHASE);
317 * Select how the frequency is to be controlled
318 * and in which mode (PLL or FLL).
320 secs = (long)(__ktime_get_real_seconds() - time_reftime);
321 if (unlikely(time_status & STA_FREQHOLD))
324 time_reftime = __ktime_get_real_seconds();
327 freq_adj = ntp_update_offset_fll(offset64, secs);
330 * Clamp update interval to reduce PLL gain with low
331 * sampling rate (e.g. intermittent network connection)
332 * to avoid instability.
334 if (unlikely(secs > 1 << (SHIFT_PLL + 1 + time_constant)))
335 secs = 1 << (SHIFT_PLL + 1 + time_constant);
337 freq_adj += (offset64 * secs) <<
338 (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant));
340 freq_adj = min(freq_adj + time_freq, MAXFREQ_SCALED);
342 time_freq = max(freq_adj, -MAXFREQ_SCALED);
344 time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
348 * ntp_clear - Clears the NTP state variables
352 time_adjust = 0; /* stop active adjtime() */
353 time_status |= STA_UNSYNC;
354 time_maxerror = NTP_PHASE_LIMIT;
355 time_esterror = NTP_PHASE_LIMIT;
357 ntp_update_frequency();
359 tick_length = tick_length_base;
362 ntp_next_leap_sec = TIME64_MAX;
363 /* Clear PPS state variables */
368 u64 ntp_tick_length(void)
374 * ntp_get_next_leap - Returns the next leapsecond in CLOCK_REALTIME ktime_t
376 * Provides the time of the next leapsecond against CLOCK_REALTIME in
377 * a ktime_t format. Returns KTIME_MAX if no leapsecond is pending.
379 ktime_t ntp_get_next_leap(void)
383 if ((time_state == TIME_INS) && (time_status & STA_INS))
384 return ktime_set(ntp_next_leap_sec, 0);
390 * this routine handles the overflow of the microsecond field
392 * The tricky bits of code to handle the accurate clock support
393 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
394 * They were originally developed for SUN and DEC kernels.
395 * All the kudos should go to Dave for this stuff.
397 * Also handles leap second processing, and returns leap offset
399 int second_overflow(time64_t secs)
406 * Leap second processing. If in leap-insert state at the end of the
407 * day, the system clock is set back one second; if in leap-delete
408 * state, the system clock is set ahead one second.
410 switch (time_state) {
412 if (time_status & STA_INS) {
413 time_state = TIME_INS;
414 div_s64_rem(secs, SECS_PER_DAY, &rem);
415 ntp_next_leap_sec = secs + SECS_PER_DAY - rem;
416 } else if (time_status & STA_DEL) {
417 time_state = TIME_DEL;
418 div_s64_rem(secs + 1, SECS_PER_DAY, &rem);
419 ntp_next_leap_sec = secs + SECS_PER_DAY - rem;
423 if (!(time_status & STA_INS)) {
424 ntp_next_leap_sec = TIME64_MAX;
425 time_state = TIME_OK;
426 } else if (secs == ntp_next_leap_sec) {
428 time_state = TIME_OOP;
430 "Clock: inserting leap second 23:59:60 UTC\n");
434 if (!(time_status & STA_DEL)) {
435 ntp_next_leap_sec = TIME64_MAX;
436 time_state = TIME_OK;
437 } else if (secs == ntp_next_leap_sec) {
439 ntp_next_leap_sec = TIME64_MAX;
440 time_state = TIME_WAIT;
442 "Clock: deleting leap second 23:59:59 UTC\n");
446 ntp_next_leap_sec = TIME64_MAX;
447 time_state = TIME_WAIT;
450 if (!(time_status & (STA_INS | STA_DEL)))
451 time_state = TIME_OK;
456 /* Bump the maxerror field */
457 time_maxerror += MAXFREQ / NSEC_PER_USEC;
458 if (time_maxerror > NTP_PHASE_LIMIT) {
459 time_maxerror = NTP_PHASE_LIMIT;
460 time_status |= STA_UNSYNC;
463 /* Compute the phase adjustment for the next second */
464 tick_length = tick_length_base;
466 delta = ntp_offset_chunk(time_offset);
467 time_offset -= delta;
468 tick_length += delta;
470 /* Check PPS signal */
476 if (time_adjust > MAX_TICKADJ) {
477 time_adjust -= MAX_TICKADJ;
478 tick_length += MAX_TICKADJ_SCALED;
482 if (time_adjust < -MAX_TICKADJ) {
483 time_adjust += MAX_TICKADJ;
484 tick_length -= MAX_TICKADJ_SCALED;
488 tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
496 static void sync_hw_clock(struct work_struct *work);
497 static DECLARE_DELAYED_WORK(sync_work, sync_hw_clock);
499 static void sched_sync_hw_clock(struct timespec64 now,
500 unsigned long target_nsec, bool fail)
503 struct timespec64 next;
505 ktime_get_real_ts64(&next);
510 * Try again as soon as possible. Delaying long periods
511 * decreases the accuracy of the work queue timer. Due to this
512 * the algorithm is very likely to require a short-sleep retry
513 * after the above long sleep to synchronize ts_nsec.
518 /* Compute the needed delay that will get to tv_nsec == target_nsec */
519 next.tv_nsec = target_nsec - next.tv_nsec;
520 if (next.tv_nsec <= 0)
521 next.tv_nsec += NSEC_PER_SEC;
522 if (next.tv_nsec >= NSEC_PER_SEC) {
524 next.tv_nsec -= NSEC_PER_SEC;
527 queue_delayed_work(system_power_efficient_wq, &sync_work,
528 timespec64_to_jiffies(&next));
531 static void sync_rtc_clock(void)
533 unsigned long target_nsec;
534 struct timespec64 adjust, now;
537 if (!IS_ENABLED(CONFIG_RTC_SYSTOHC))
540 ktime_get_real_ts64(&now);
543 if (persistent_clock_is_local)
544 adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
547 * The current RTC in use will provide the target_nsec it wants to be
548 * called at, and does rtc_tv_nsec_ok internally.
550 rc = rtc_set_ntp_time(adjust, &target_nsec);
554 sched_sync_hw_clock(now, target_nsec, rc);
557 #ifdef CONFIG_GENERIC_CMOS_UPDATE
558 int __weak update_persistent_clock64(struct timespec64 now64)
564 static bool sync_cmos_clock(void)
567 struct timespec64 now;
568 struct timespec64 adjust;
570 long target_nsec = NSEC_PER_SEC / 2;
572 if (!IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE))
579 * Historically update_persistent_clock64() has followed x86
580 * semantics, which match the MC146818A/etc RTC. This RTC will store
581 * 'adjust' and then in .5s it will advance once second.
583 * Architectures are strongly encouraged to use rtclib and not
584 * implement this legacy API.
586 ktime_get_real_ts64(&now);
587 if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) {
588 if (persistent_clock_is_local)
589 adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
590 rc = update_persistent_clock64(adjust);
592 * The machine does not support update_persistent_clock64 even
593 * though it defines CONFIG_GENERIC_CMOS_UPDATE.
601 sched_sync_hw_clock(now, target_nsec, rc);
606 * If we have an externally synchronized Linux clock, then update RTC clock
607 * accordingly every ~11 minutes. Generally RTCs can only store second
608 * precision, but many RTCs will adjust the phase of their second tick to
609 * match the moment of update. This infrastructure arranges to call to the RTC
610 * set at the correct moment to phase synchronize the RTC second tick over
611 * with the kernel clock.
613 static void sync_hw_clock(struct work_struct *work)
618 if (sync_cmos_clock())
624 void ntp_notify_cmos_timer(void)
629 if (IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE) ||
630 IS_ENABLED(CONFIG_RTC_SYSTOHC))
631 queue_delayed_work(system_power_efficient_wq, &sync_work, 0);
635 * Propagate a new txc->status value into the NTP state:
637 static inline void process_adj_status(const struct timex *txc)
639 if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
640 time_state = TIME_OK;
641 time_status = STA_UNSYNC;
642 ntp_next_leap_sec = TIME64_MAX;
643 /* restart PPS frequency calibration */
644 pps_reset_freq_interval();
648 * If we turn on PLL adjustments then reset the
649 * reference time to current time.
651 if (!(time_status & STA_PLL) && (txc->status & STA_PLL))
652 time_reftime = __ktime_get_real_seconds();
654 /* only set allowed bits */
655 time_status &= STA_RONLY;
656 time_status |= txc->status & ~STA_RONLY;
660 static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai)
662 if (txc->modes & ADJ_STATUS)
663 process_adj_status(txc);
665 if (txc->modes & ADJ_NANO)
666 time_status |= STA_NANO;
668 if (txc->modes & ADJ_MICRO)
669 time_status &= ~STA_NANO;
671 if (txc->modes & ADJ_FREQUENCY) {
672 time_freq = txc->freq * PPM_SCALE;
673 time_freq = min(time_freq, MAXFREQ_SCALED);
674 time_freq = max(time_freq, -MAXFREQ_SCALED);
675 /* update pps_freq */
676 pps_set_freq(time_freq);
679 if (txc->modes & ADJ_MAXERROR)
680 time_maxerror = txc->maxerror;
682 if (txc->modes & ADJ_ESTERROR)
683 time_esterror = txc->esterror;
685 if (txc->modes & ADJ_TIMECONST) {
686 time_constant = txc->constant;
687 if (!(time_status & STA_NANO))
689 time_constant = min(time_constant, (long)MAXTC);
690 time_constant = max(time_constant, 0l);
693 if (txc->modes & ADJ_TAI && txc->constant > 0)
694 *time_tai = txc->constant;
696 if (txc->modes & ADJ_OFFSET)
697 ntp_update_offset(txc->offset);
699 if (txc->modes & ADJ_TICK)
700 tick_usec = txc->tick;
702 if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
703 ntp_update_frequency();
708 * adjtimex mainly allows reading (and writing, if superuser) of
709 * kernel time-keeping variables. used by xntpd.
711 int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai)
715 if (txc->modes & ADJ_ADJTIME) {
716 long save_adjust = time_adjust;
718 if (!(txc->modes & ADJ_OFFSET_READONLY)) {
719 /* adjtime() is independent from ntp_adjtime() */
720 time_adjust = txc->offset;
721 ntp_update_frequency();
723 txc->offset = save_adjust;
726 /* If there are input parameters, then process them: */
728 process_adjtimex_modes(txc, time_tai);
730 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
732 if (!(time_status & STA_NANO))
733 txc->offset /= NSEC_PER_USEC;
736 result = time_state; /* mostly `TIME_OK' */
737 /* check for errors */
738 if (is_error_status(time_status))
741 txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
742 PPM_SCALE_INV, NTP_SCALE_SHIFT);
743 txc->maxerror = time_maxerror;
744 txc->esterror = time_esterror;
745 txc->status = time_status;
746 txc->constant = time_constant;
748 txc->tolerance = MAXFREQ_SCALED / PPM_SCALE;
749 txc->tick = tick_usec;
750 txc->tai = *time_tai;
752 /* fill PPS status fields */
755 txc->time.tv_sec = (time_t)ts->tv_sec;
756 txc->time.tv_usec = ts->tv_nsec;
757 if (!(time_status & STA_NANO))
758 txc->time.tv_usec /= NSEC_PER_USEC;
760 /* Handle leapsec adjustments */
761 if (unlikely(ts->tv_sec >= ntp_next_leap_sec)) {
762 if ((time_state == TIME_INS) && (time_status & STA_INS)) {
767 if ((time_state == TIME_DEL) && (time_status & STA_DEL)) {
772 if ((time_state == TIME_OOP) &&
773 (ts->tv_sec == ntp_next_leap_sec)) {
781 #ifdef CONFIG_NTP_PPS
783 /* actually struct pps_normtime is good old struct timespec, but it is
784 * semantically different (and it is the reason why it was invented):
785 * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ]
786 * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */
787 struct pps_normtime {
788 s64 sec; /* seconds */
789 long nsec; /* nanoseconds */
792 /* normalize the timestamp so that nsec is in the
793 ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */
794 static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts)
796 struct pps_normtime norm = {
801 if (norm.nsec > (NSEC_PER_SEC >> 1)) {
802 norm.nsec -= NSEC_PER_SEC;
809 /* get current phase correction and jitter */
810 static inline long pps_phase_filter_get(long *jitter)
812 *jitter = pps_tf[0] - pps_tf[1];
816 /* TODO: test various filters */
820 /* add the sample to the phase filter */
821 static inline void pps_phase_filter_add(long err)
823 pps_tf[2] = pps_tf[1];
824 pps_tf[1] = pps_tf[0];
828 /* decrease frequency calibration interval length.
829 * It is halved after four consecutive unstable intervals.
831 static inline void pps_dec_freq_interval(void)
833 if (--pps_intcnt <= -PPS_INTCOUNT) {
834 pps_intcnt = -PPS_INTCOUNT;
835 if (pps_shift > PPS_INTMIN) {
842 /* increase frequency calibration interval length.
843 * It is doubled after four consecutive stable intervals.
845 static inline void pps_inc_freq_interval(void)
847 if (++pps_intcnt >= PPS_INTCOUNT) {
848 pps_intcnt = PPS_INTCOUNT;
849 if (pps_shift < PPS_INTMAX) {
856 /* update clock frequency based on MONOTONIC_RAW clock PPS signal
859 * At the end of the calibration interval the difference between the
860 * first and last MONOTONIC_RAW clock timestamps divided by the length
861 * of the interval becomes the frequency update. If the interval was
862 * too long, the data are discarded.
863 * Returns the difference between old and new frequency values.
865 static long hardpps_update_freq(struct pps_normtime freq_norm)
867 long delta, delta_mod;
870 /* check if the frequency interval was too long */
871 if (freq_norm.sec > (2 << pps_shift)) {
872 time_status |= STA_PPSERROR;
874 pps_dec_freq_interval();
875 printk_deferred(KERN_ERR
876 "hardpps: PPSERROR: interval too long - %lld s\n",
881 /* here the raw frequency offset and wander (stability) is
882 * calculated. If the wander is less than the wander threshold
883 * the interval is increased; otherwise it is decreased.
885 ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT,
887 delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT);
889 if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) {
890 printk_deferred(KERN_WARNING
891 "hardpps: PPSWANDER: change=%ld\n", delta);
892 time_status |= STA_PPSWANDER;
894 pps_dec_freq_interval();
895 } else { /* good sample */
896 pps_inc_freq_interval();
899 /* the stability metric is calculated as the average of recent
900 * frequency changes, but is used only for performance
905 delta_mod = -delta_mod;
906 pps_stabil += (div_s64(((s64)delta_mod) <<
907 (NTP_SCALE_SHIFT - SHIFT_USEC),
908 NSEC_PER_USEC) - pps_stabil) >> PPS_INTMIN;
910 /* if enabled, the system clock frequency is updated */
911 if ((time_status & STA_PPSFREQ) != 0 &&
912 (time_status & STA_FREQHOLD) == 0) {
913 time_freq = pps_freq;
914 ntp_update_frequency();
920 /* correct REALTIME clock phase error against PPS signal */
921 static void hardpps_update_phase(long error)
923 long correction = -error;
926 /* add the sample to the median filter */
927 pps_phase_filter_add(correction);
928 correction = pps_phase_filter_get(&jitter);
930 /* Nominal jitter is due to PPS signal noise. If it exceeds the
931 * threshold, the sample is discarded; otherwise, if so enabled,
932 * the time offset is updated.
934 if (jitter > (pps_jitter << PPS_POPCORN)) {
935 printk_deferred(KERN_WARNING
936 "hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
937 jitter, (pps_jitter << PPS_POPCORN));
938 time_status |= STA_PPSJITTER;
940 } else if (time_status & STA_PPSTIME) {
941 /* correct the time using the phase offset */
942 time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT,
944 /* cancel running adjtime() */
948 pps_jitter += (jitter - pps_jitter) >> PPS_INTMIN;
952 * __hardpps() - discipline CPU clock oscillator to external PPS signal
954 * This routine is called at each PPS signal arrival in order to
955 * discipline the CPU clock oscillator to the PPS signal. It takes two
956 * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former
957 * is used to correct clock phase error and the latter is used to
958 * correct the frequency.
960 * This code is based on David Mills's reference nanokernel
961 * implementation. It was mostly rewritten but keeps the same idea.
963 void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
965 struct pps_normtime pts_norm, freq_norm;
967 pts_norm = pps_normalize_ts(*phase_ts);
969 /* clear the error bits, they will be set again if needed */
970 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
972 /* indicate signal presence */
973 time_status |= STA_PPSSIGNAL;
974 pps_valid = PPS_VALID;
976 /* when called for the first time,
977 * just start the frequency interval */
978 if (unlikely(pps_fbase.tv_sec == 0)) {
983 /* ok, now we have a base for frequency calculation */
984 freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, pps_fbase));
986 /* check that the signal is in the range
987 * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */
988 if ((freq_norm.sec == 0) ||
989 (freq_norm.nsec > MAXFREQ * freq_norm.sec) ||
990 (freq_norm.nsec < -MAXFREQ * freq_norm.sec)) {
991 time_status |= STA_PPSJITTER;
992 /* restart the frequency calibration interval */
994 printk_deferred(KERN_ERR "hardpps: PPSJITTER: bad pulse\n");
1000 /* check if the current frequency interval is finished */
1001 if (freq_norm.sec >= (1 << pps_shift)) {
1003 /* restart the frequency calibration interval */
1004 pps_fbase = *raw_ts;
1005 hardpps_update_freq(freq_norm);
1008 hardpps_update_phase(pts_norm.nsec);
1011 #endif /* CONFIG_NTP_PPS */
1013 static int __init ntp_tick_adj_setup(char *str)
1015 int rc = kstrtos64(str, 0, &ntp_tick_adj);
1019 ntp_tick_adj <<= NTP_SCALE_SHIFT;
1023 __setup("ntp_tick_adj=", ntp_tick_adj_setup);
1025 void __init ntp_init(void)