--- attic/digest-timing.c.orig 2018-06-11 21:36:09.000000000 -0700 +++ attic/digest-timing.c 2018-06-19 23:14:59.000000000 -0700 @@ -33,6 +33,8 @@ #include <openssl/rand.h> #include <openssl/objects.h> +#include "ntp_machine.h" /* For clock_gettime fallback */ + #define UNUSED_ARG(arg) ((void)(arg)) #ifndef EVP_MD_CTX_reset @@ -43,6 +45,23 @@ #define EVP_MD_CTX_reset(ctx) EVP_MD_CTX_init(ctx) #endif +/* + * Pecking order for clock_gettime interval measurements + * CLOCK_MONOTONIC_RAW is free of NTP adjustments + * CLOCK_MONOTONIC is affected by NTP slewing but not step adjustments + * CLOCK_REALTIME is affected by all NTP adjustments, including stepping + * + * Assuming this test isn't run while stepping is a possibility, + * even CLOCK_REALTIME is acceptable. + */ +#if defined(CLOCK_MONOTONIC_RAW) +#define CLOCK_INTERVAL CLOCK_MONOTONIC_RAW +#elif defined(CLOCK_MONOTONIC) +#define CLOCK_INTERVAL CLOCK_MONOTONIC +#else +#define CLOCK_INTERVAL CLOCK_REALTIME +#endif + /* Get timing for old slower way too. Pre Feb 2018 */ #define DoSLOW 1 @@ -144,21 +163,21 @@ if (NULL == digest) return; - clock_gettime(CLOCK_MONOTONIC, &start); + clock_gettime(CLOCK_INTERVAL, &start); for (i = 0; i < NUM; i++) { digestlength = SSL_Digest(digest, key, keylength, pkt, pktlength); } - clock_gettime(CLOCK_MONOTONIC, &stop); + clock_gettime(CLOCK_INTERVAL, &stop); fast = (stop.tv_sec-start.tv_sec)*1E9 + (stop.tv_nsec-start.tv_nsec); printf("%10s %2d %2d %2u %6.0f %6.3f", name, keylength, pktlength, digestlength, fast/NUM, fast/1E9); #ifdef DoSLOW - clock_gettime(CLOCK_MONOTONIC, &start); + clock_gettime(CLOCK_INTERVAL, &start); for (i = 0; i < NUM; i++) { digestlength = SSL_DigestSlow(type, key, keylength, pkt, pktlength); } - clock_gettime(CLOCK_MONOTONIC, &stop); + clock_gettime(CLOCK_INTERVAL, &stop); slow = (stop.tv_sec-start.tv_sec)*1E9 + (stop.tv_nsec-start.tv_nsec); printf(" %6.0f %2.0f %4.0f", slow/NUM, (slow-fast)*100.0/slow, (slow-fast)/NUM); @@ -183,11 +202,11 @@ if (NULL == cipher) return; - clock_gettime(CLOCK_MONOTONIC, &start); + clock_gettime(CLOCK_INTERVAL, &start); for (i = 0; i < NUM; i++) { digestlength = SSL_CMAC(cipher, key, keylength, pkt, pktlength); } - clock_gettime(CLOCK_MONOTONIC, &stop); + clock_gettime(CLOCK_INTERVAL, &stop); fast = (stop.tv_sec-start.tv_sec)*1E9 + (stop.tv_nsec-start.tv_nsec); printf("%10s %2d %2d %2lu %6.0f %6.3f", name, keylength, pktlength, digestlength, fast/NUM, fast/1E9); --- include/ntp_machine.h.orig 2018-06-11 21:36:09.000000000 -0700 +++ include/ntp_machine.h 2018-06-19 23:14:59.000000000 -0700 @@ -13,14 +13,53 @@ #ifndef CLOCK_REALTIME /* - * Pacify platforms that don't have a real clock_gettime(2), - * notably Mac OS X. + * Handle platforms that don't have a real clock_gettime(2), + * notably some versions of Mac OS X. */ + +#include <errno.h> + #define CLOCK_REALTIME 0 -#define CLOCK_MONOTONIC 1 typedef int clockid_t; -int clock_gettime(clockid_t clock_id, struct timespec *tp); -#endif + +static inline int clock_gettime(clockid_t clk_id, struct timespec *tp) +{ + struct timeval tv; + + switch (clk_id) { + case CLOCK_REALTIME: + /* + * On OSX, it's tempting to use clock_get_time() for its apparent + * nanosecond resolution, but it really only has microsecond + * resolution, and is substantially slower than gettimeofday(). + */ + if (gettimeofday(&tv, NULL)) + return -1; + tp->tv_sec = tv.tv_sec; + tp->tv_nsec = tv.tv_usec * 1000; + return 0; + default: + errno = EINVAL; + return -1; + } +} + +static inline int clock_settime(clockid_t clk_id, const struct timespec *tp) +{ + struct timeval tv; + + switch (clk_id) { + case CLOCK_REALTIME: + tv.tv_sec = tp->tv_sec; + tv.tv_usec = (tp->tv_nsec + 500) / 1000; + return settimeofday(&tv, NULL); + default: + errno = EINVAL; + return -1; + } +} + +#endif /* !CLOCK_REALTIME */ int ntp_set_tod (struct timespec *tvs); --- include/ntp_stdlib.h.orig 2018-06-11 21:36:09.000000000 -0700 +++ include/ntp_stdlib.h 2018-06-19 23:14:59.000000000 -0700 @@ -113,7 +113,9 @@ extern const char * ceventstr (int); extern const char * res_match_flags(unsigned short); extern const char * res_access_flags(unsigned short); +#ifdef HAVE_KERNEL_PLL extern const char * k_st_flags (uint32_t); +#endif extern char * statustoa (int, int); extern sockaddr_u * netof6 (sockaddr_u *); extern char * numtoa (uint32_t); --- include/ntp_syscall.h.orig 2018-06-11 21:36:09.000000000 -0700 +++ include/ntp_syscall.h 2018-06-19 23:14:59.000000000 -0700 @@ -9,9 +9,11 @@ #ifndef GUARD_NTP_SYSCALL_H #define GUARD_NTP_SYSCALL_H +#ifdef HAVE_SYS_TIMEX_H # include <sys/time.h> /* prerequisite on NetBSD */ # include <sys/timex.h> extern int ntp_adjtime_ns(struct timex *); +#endif /* * The units of the maxerror and esterror fields vary by platform. If --- libntp/clockwork.c.orig 2018-06-11 21:36:09.000000000 -0700 +++ libntp/clockwork.c 2018-06-19 23:14:59.000000000 -0700 @@ -5,8 +5,10 @@ #include "config.h" #include <unistd.h> -#include <sys/time.h> /* prerequisite on NetBSD */ -#include <sys/timex.h> +#ifdef HAVE_SYS_TIMEX_H +# include <sys/time.h> /* prerequisite on NetBSD */ +# include <sys/timex.h> +#endif #include "ntp.h" #include "ntp_machine.h" @@ -77,14 +79,10 @@ int saved_errno; TPRINT(1, ("In ntp_set_tod\n")); -#ifdef HAVE_CLOCK_SETTIME errno = 0; rc = clock_settime(CLOCK_REALTIME, tvs); saved_errno = errno; TPRINT(1, ("ntp_set_tod: clock_settime: %d %m\n", rc)); -#else -#error POSIX clock_settime(2) is required -#endif /* HAVE_CLOCK_SETTIME */ errno = saved_errno; /* for %m below */ TPRINT(1, ("ntp_set_tod: Final result: clock_settime: %d %m\n", rc)); --- libntp/statestr.c.orig 2018-06-11 21:36:09.000000000 -0700 +++ libntp/statestr.c 2018-06-19 23:14:59.000000000 -0700 @@ -12,7 +12,9 @@ #include "lib_strbuf.h" #include "ntp_refclock.h" #include "ntp_control.h" +#ifdef HAVE_KERNEL_PLL # include "ntp_syscall.h" +#endif /* @@ -186,23 +188,50 @@ /* not used with getcode(), no terminating entry needed */ }; +#ifdef HAVE_KERNEL_PLL /* * kernel discipline status bits */ static const struct codestring k_st_bits[] = { +# ifdef STA_PLL { STA_PLL, "pll" }, +# endif +# ifdef STA_PPSFREQ { STA_PPSFREQ, "ppsfreq" }, +# endif +# ifdef STA_PPSTIME { STA_PPSTIME, "ppstime" }, +# endif +# ifdef STA_FLL { STA_FLL, "fll" }, +# endif +# ifdef STA_INS { STA_INS, "ins" }, +# endif +# ifdef STA_DEL { STA_DEL, "del" }, +# endif +# ifdef STA_UNSYNC { STA_UNSYNC, "unsync" }, +# endif +# ifdef STA_FREQHOLD { STA_FREQHOLD, "freqhold" }, +# endif +# ifdef STA_PPSSIGNAL { STA_PPSSIGNAL, "ppssignal" }, +# endif +# ifdef STA_PPSJITTER { STA_PPSJITTER, "ppsjitter" }, +# endif +# ifdef STA_PPSWANDER { STA_PPSWANDER, "ppswander" }, +# endif +# ifdef STA_PPSERROR { STA_PPSERROR, "ppserror" }, +# endif +# ifdef STA_CLOCKERR { STA_CLOCKERR, "clockerr" }, +# endif # ifdef STA_NANO { STA_NANO, "nano" }, # endif @@ -214,6 +243,7 @@ # endif /* not used with getcode(), no terminating entry needed */ }; +#endif /* HAVE_KERNEL_PLL */ /* Forwards */ static const char * getcode(int, const struct codestring *); @@ -315,9 +345,11 @@ (tab == peer_st_bits) ? "peer_st" : +#ifdef HAVE_KERNEL_PLL (tab == k_st_bits) ? "kern_st" : +#endif "", (unsigned)bits, (int)LIB_BUFLENGTH); errno = saved_errno; @@ -356,6 +388,7 @@ } +#ifdef HAVE_KERNEL_PLL const char * k_st_flags( uint32_t st @@ -363,6 +396,8 @@ { return decode_bitflags((int)st, " ", k_st_bits, COUNTOF(k_st_bits)); } +#endif /* HAVE_KERNEL_PLL */ + /* * statustoa - return a descriptive string for a peer status --- ntpd/ntp_control.c.orig 2018-06-11 21:36:09.000000000 -0700 +++ ntpd/ntp_control.c 2018-06-19 23:14:59.000000000 -0700 @@ -1469,6 +1469,7 @@ char str[256]; double dtemp; const char *ss; +#ifdef HAVE_KERNEL_PLL static struct timex ntx; static unsigned long ntp_adjtime_time; @@ -1483,6 +1484,7 @@ else ntp_adjtime_time = current_time; } +#endif /* HAVE_KERNEL_PLL */ switch (varid) { @@ -1856,50 +1858,93 @@ break; /* - * CTL_IF_KERNPPS() puts a zero if kernel hard PPS is not + * CTL_IF_KERNLOOP() puts a zero if the kernel loop is + * unavailable, otherwise calls putfunc with args. + */ +#ifndef HAVE_KERNEL_PLL +# define CTL_IF_KERNLOOP(putfunc, args) \ + ctl_putint(sys_var[varid].text, 0) +#else +# define CTL_IF_KERNLOOP(putfunc, args) \ + putfunc args +#endif + + /* + * CTL_IF_KERNPPS() puts a zero if either the kernel + * loop is unavailable, or kernel hard PPS is not * active, otherwise calls putfunc with args. */ +#ifndef HAVE_KERNEL_PLL +# define CTL_IF_KERNPPS(putfunc, args) \ + ctl_putint(sys_var[varid].text, 0) +#else # define CTL_IF_KERNPPS(putfunc, args) \ if (0 == ntx.shift) \ ctl_putint(sys_var[varid].text, 0); \ else \ putfunc args /* no trailing ; */ +#endif case CS_K_OFFSET: - ctl_putdblf(sys_var[varid].text, false, -1, - ntp_error_in_seconds(ntx.offset) * MS_PER_S); + CTL_IF_KERNLOOP( + ctl_putdblf, + (sys_var[varid].text, false, -1, + ntp_error_in_seconds(ntx.offset) * MS_PER_S) + ); break; case CS_K_FREQ: - ctl_putsfp(sys_var[varid].text, ntx.freq); + CTL_IF_KERNLOOP( + ctl_putsfp, + (sys_var[varid].text, ntx.freq) + ); break; case CS_K_MAXERR: - ctl_putdblf(sys_var[varid].text, false, 6, - ntp_error_in_seconds(ntx.maxerror) * MS_PER_S); + CTL_IF_KERNLOOP( + ctl_putdblf, + (sys_var[varid].text, false, 6, + ntp_error_in_seconds(ntx.maxerror) * MS_PER_S) + ); break; case CS_K_ESTERR: - ctl_putdblf(sys_var[varid].text, false, 6, - ntp_error_in_seconds(ntx.esterror) * MS_PER_S); + CTL_IF_KERNLOOP( + ctl_putdblf, + (sys_var[varid].text, false, 6, + ntp_error_in_seconds(ntx.esterror) * MS_PER_S) + ); break; case CS_K_STFLAGS: +#ifndef HAVE_KERNEL_PLL + ss = ""; +#else ss = k_st_flags((uint32_t)ntx.status); +#endif ctl_putstr(sys_var[varid].text, ss, strlen(ss)); break; case CS_K_TIMECONST: - ctl_putint(sys_var[varid].text, ntx.constant); + CTL_IF_KERNLOOP( + ctl_putint, + (sys_var[varid].text, ntx.constant) + ); break; case CS_K_PRECISION: - ctl_putdblf(sys_var[varid].text, false, 6, - ntp_error_in_seconds(ntx.precision) * MS_PER_S); + CTL_IF_KERNLOOP( + ctl_putdblf, + (sys_var[varid].text, false, 6, + ntp_error_in_seconds(ntx.precision) * MS_PER_S) + ); break; case CS_K_FREQTOL: - ctl_putsfp(sys_var[varid].text, ntx.tolerance); + CTL_IF_KERNLOOP( + ctl_putsfp, + (sys_var[varid].text, ntx.tolerance) + ); break; case CS_K_PPS_FREQ: --- ntpd/ntp_loopfilter.c.orig 2018-06-11 21:36:09.000000000 -0700 +++ ntpd/ntp_loopfilter.c 2018-06-19 23:14:59.000000000 -0700 @@ -23,8 +23,10 @@ #define NTP_MAXFREQ 500e-6 +#ifdef HAVE_KERNEL_PLL # define FREQTOD(x) ((x) / 65536e6) /* NTP to double */ # define DTOFREQ(x) ((int32_t)((x) * 65536e6)) /* double to NTP */ +#endif /* * This is an implementation of the clock discipline algorithm described @@ -122,13 +124,16 @@ static double init_drift_comp; /* initial frequency (PPM) */ double clock_stability; /* frequency stability (wander) (s/s) */ unsigned int sys_tai; /* TAI offset from UTC */ -#ifndef ENABLE_LOCKCLOCK +#if !defined(ENABLE_LOCKCLOCK) && defined(HAVE_KERNEL_PLL) static bool loop_started; /* true after LOOP_DRIFTINIT */ +#endif /* !ENABLE_LOCKCLOCK && HAVE_KERNEL_PLL */ +#ifndef ENABLE_LOCKCLOCK static void rstclock (int, double); /* transition function */ static double direct_freq(double); /* direct set frequency */ static void set_freq(double); /* set frequency */ #endif /* ENABLE_LOCKCLOCK */ +#ifdef HAVE_KERNEL_PLL #ifndef PATH_MAX # define PATH_MAX MAX_PATH #endif @@ -144,6 +149,7 @@ #endif /* ENABLE_LOCKCLOCK */ static void start_kern_loop(void); static void stop_kern_loop(void); +#endif /* HAVE_KERNEL_PLL */ /* * Clock state machine control flags @@ -160,7 +166,9 @@ int freq_cnt; /* initial frequency clamp */ static int freq_set; /* initial set frequency switch */ +#ifdef HAVE_KERNEL_PLL static bool ext_enable; /* external clock enabled */ +#endif /* HAVE_KERNEL_PLL */ /* * Clock state machine variables @@ -180,6 +188,7 @@ static int sys_huffptr; /* huff-n'-puff filter pointer */ static double sys_mindly; /* huff-n'-puff filter min delay */ +#if defined(HAVE_KERNEL_PLL) /* Emacs cc-mode goes nuts if we split the next line... */ #define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \ MOD_STATUS | MOD_TIMECONST) @@ -189,7 +198,9 @@ static struct sigaction newsigsys; /* new sigaction status */ static sigjmp_buf env; /* environment var. for pll_trap() */ #endif /* SIGSYS */ +#endif /* HAVE_KERNEL_PLL */ +#ifdef HAVE_KERNEL_PLL #ifndef ENABLE_LOCKCLOCK static void sync_status(const char *what, int ostatus, int nstatus) @@ -215,6 +226,7 @@ } return this_file; } +#endif /* HAVE_KERNEL_PLL */ /* * init_loopfilter - initialize loop filter data @@ -230,6 +242,7 @@ freq_cnt = (int)clock_minstep; } +#ifdef HAVE_KERNEL_PLL /* * ntp_adjtime_error_handler - process errors from ntp_adjtime */ @@ -428,6 +441,7 @@ } return; } +#endif /* * local_clock - the NTP logical clock loop filter. @@ -453,7 +467,9 @@ #else int rval; /* return code */ int osys_poll; /* old system poll */ +#ifdef HAVE_KERNEL_PLL int ntp_adj_ret; /* returned by ntp_adjtime */ +#endif /* HAVE_KERNEL_PLL */ double mu; /* interval since last update */ double clock_frequency; /* clock frequency */ double dtemp, etemp; /* double temps */ @@ -722,6 +738,7 @@ } } +#ifdef HAVE_KERNEL_PLL /* * This code segment works when clock adjustments are made using * precision time kernel support and the ntp_adjtime() system @@ -837,6 +854,7 @@ } #endif /* STA_NANO */ } +#endif /* HAVE_KERNEL_PLL */ /* * Clamp the frequency within the tolerance range and calculate @@ -950,8 +968,10 @@ } else if (freq_cnt > 0) { offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1)); freq_cnt--; +#ifdef HAVE_KERNEL_PLL } else if (clock_ctl.pll_control && clock_ctl.kern_enable) { offset_adj = 0.; +#endif /* HAVE_KERNEL_PLL */ } else { offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll)); } @@ -962,9 +982,11 @@ * set_freq(). Otherwise it is a component of the adj_systime() * offset. */ +#ifdef HAVE_KERNEL_PLL if (clock_ctl.pll_control && clock_ctl.kern_enable) freq_adj = 0.; else +#endif /* HAVE_KERNEL_PLL */ freq_adj = drift_comp; /* Bound absolute value of total adjustment to NTP_MAXFREQ. */ @@ -1056,6 +1078,7 @@ drift_comp = freq; loop_desc = "ntpd"; +#ifdef HAVE_KERNEL_PLL if (clock_ctl.pll_control) { int ntp_adj_ret; ZERO(ntv); @@ -1068,11 +1091,13 @@ ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, false, false, __LINE__ - 1); } } +#endif /* HAVE_KERNEL_PLL */ mprintf_event(EVNT_FSET, NULL, "%s %.6f PPM", loop_desc, drift_comp * US_PER_S); } #endif /* HAVE_LOCKCLOCK */ +#ifdef HAVE_KERNEL_PLL static void start_kern_loop(void) { @@ -1133,8 +1158,10 @@ "kernel time sync enabled"); } } +#endif /* HAVE_KERNEL_PLL */ +#ifdef HAVE_KERNEL_PLL static void stop_kern_loop(void) { @@ -1142,6 +1169,7 @@ report_event(EVNT_KERN, NULL, "kernel time sync disabled"); } +#endif /* HAVE_KERNEL_PLL */ /* @@ -1154,17 +1182,21 @@ { if (clock_ctl.kern_enable == use_kern_loop) return; +#ifdef HAVE_KERNEL_PLL if (clock_ctl.pll_control && !use_kern_loop) stop_kern_loop(); +#endif clock_ctl.kern_enable = use_kern_loop; +#ifdef HAVE_KERNEL_PLL if (clock_ctl.pll_control && use_kern_loop) start_kern_loop(); +#endif /* * If this loop selection change occurs after initial startup, * call set_freq() to switch the frequency compensation to or * from the kernel loop. */ -#if !defined(ENABLE_LOCKCLOCK) +#if defined(HAVE_KERNEL_PLL) && !defined(ENABLE_LOCKCLOCK) if (clock_ctl.pll_control && loop_started) set_freq(drift_comp); #endif @@ -1215,10 +1247,12 @@ */ case LOOP_DRIFTINIT: #ifndef ENABLE_LOCKCLOCK +#ifdef HAVE_KERNEL_PLL if (clock_ctl.mode_ntpdate) break; start_kern_loop(); +#endif /* HAVE_KERNEL_PLL */ /* * Initialize frequency if given; otherwise, begin frequency @@ -1236,13 +1270,16 @@ rstclock(EVNT_FSET, 0); else rstclock(EVNT_NSET, 0); +#ifdef HAVE_KERNEL_PLL loop_started = true; +#endif /* HAVE_KERNEL_PLL */ #endif /* !ENABLE_LOCKCLOCK */ break; case LOOP_KERN_CLEAR: #if 0 /* XXX: needs more review, and how can we get here? */ #ifndef ENABLE_LOCKCLOCK +# ifdef HAVE_KERNEL_PLL if (clock_ctl.pll_control && clock_ctl.kern_enable) { memset((char *)&ntv, 0, sizeof(ntv)); ntv.modes = MOD_STATUS; @@ -1252,6 +1289,7 @@ pll_status, ntv.status); } +# endif /* HAVE_KERNEL_PLL */ #endif /* ENABLE_LOCKCLOCK */ #endif break; @@ -1331,7 +1369,7 @@ } -#if defined(SIGSYS) +#if defined(HAVE_KERNEL_PLL) && defined(SIGSYS) /* * _trap - trap processor for undefined syscalls * @@ -1349,4 +1387,4 @@ clock_ctl.pll_control = false; siglongjmp(env, 1); } -#endif /* SIGSYS */ +#endif /* HAVE_KERNEL_PLL && SIGSYS */ --- ntpd/ntp_timer.c.orig 2018-06-11 21:36:09.000000000 -0700 +++ ntpd/ntp_timer.c 2018-06-19 23:14:59.000000000 -0700 @@ -13,7 +13,9 @@ #include <signal.h> #include <unistd.h> +#ifdef HAVE_KERNEL_PLL #include "ntp_syscall.h" +#endif /* HAVE_KERNEL_PLL */ #ifdef HAVE_TIMER_CREATE /* TC_ERR represents the timer_create() error return value. */ @@ -373,7 +375,11 @@ leap_result_t lsdata; uint32_t lsprox; +#ifdef HAVE_KERNEL_PLL leapsec_electric((clock_ctl.pll_control && clock_ctl.kern_enable) ? electric_on : electric_off); +#else + leapsec_electric(electric_off); +#endif #ifdef ENABLE_LEAP_SMEAR leap_smear.enabled = (leap_smear_intv != 0); #endif --- ntpd/refclock_local.c.orig 2018-06-11 21:36:09.000000000 -0700 +++ ntpd/refclock_local.c 2018-06-19 23:14:59.000000000 -0700 @@ -131,6 +131,9 @@ struct peer *peer ) { +#if defined(HAVE_KERNEL_PLL) && defined(ENABLE_LOCKCLOCK) + struct timex ntv; +#endif /* HAVE_KERNEL_PLL ENABLE_LOCKCLOCK */ struct refclockproc *pp; UNUSED_ARG(unit); @@ -156,8 +159,7 @@ * If another process is disciplining the system clock, we set * the leap bits and quality indicators from the kernel. */ -#if defined(ENABLE_LOCKCLOCK) - struct timex ntv; +#if defined(HAVE_KERNEL_PLL) && defined(ENABLE_LOCKCLOCK) memset(&ntv, 0, sizeof ntv); switch (ntp_adjtime(&ntv)) { case TIME_OK: @@ -181,11 +183,11 @@ } pp->disp = 0; pp->jitter = 0; -#else /* ENABLE_LOCKCLOCK */ +#else /* HAVE_KERNEL_PLL && ENABLE_LOCKCLOCK */ pp->leap = LEAP_NOWARNING; pp->disp = DISPERSION; pp->jitter = 0; -#endif /* ENABLE_LOCKCLOCK */ +#endif /* HAVE_KERNEL_PLL && ENABLE_LOCKCLOCK */ pp->lastref = pp->lastrec; refclock_receive(peer); } --- ntpfrob/precision.c.orig 2018-06-11 21:36:09.000000000 -0700 +++ ntpfrob/precision.c 2018-06-19 23:14:59.000000000 -0700 @@ -11,6 +11,7 @@ #include "ntp_types.h" #include "ntp_calendar.h" #include "ntpfrob.h" +#include "ntp_machine.h" #define DEFAULT_SYS_PRECISION -99 --- wafhelpers/options.py.orig 2018-06-11 21:36:09.000000000 -0700 +++ wafhelpers/options.py 2018-06-19 23:14:59.000000000 -0700 @@ -21,6 +21,8 @@ default=False, help="Enable seccomp (restricts syscalls).") grp.add_option('--disable-dns-lookup', action='store_true', default=False, help="Disable DNS lookups.") + grp.add_option('--disable-kernel-pll', action='store_true', + default=False, help="Disable kernel PLL.") grp.add_option('--disable-mdns-registration', action='store_true', default=False, help="Disable MDNS registration.") grp.add_option( --- wscript.orig 2018-06-11 21:36:09.000000000 -0700 +++ wscript 2018-06-19 23:14:59.000000000 -0700 @@ -562,13 +562,13 @@ ctx.define("__EXTENSIONS__", "1", quote=False) structures = ( - ("struct if_laddrconf", ["sys/types.h", "net/if6.h"], False), - ("struct if_laddrreq", ["sys/types.h", "net/if6.h"], False), - ("struct timex", ["sys/time.h", "sys/timex.h"], True), - ("struct ntptimeval", ["sys/time.h", "sys/timex.h"], False), + ("struct if_laddrconf", ["sys/types.h", "net/if6.h"]), + ("struct if_laddrreq", ["sys/types.h", "net/if6.h"]), + ("struct timex", ["sys/time.h", "sys/timex.h"]), + ("struct ntptimeval", ["sys/time.h", "sys/timex.h"]), ) - for (s, h, r) in structures: - ctx.check_cc(type_name=s, header_name=h, mandatory=r) + for (s, h) in structures: + ctx.check_cc(type_name=s, header_name=h, mandatory=False) # waf's SNIP_FIELD should likely include this header itself # This is needed on some systems to get size_t for following checks @@ -625,8 +625,6 @@ ('adjtimex', ["sys/time.h", "sys/timex.h"]), ('backtrace_symbols_fd', ["execinfo.h"]), ('closefrom', ["stdlib.h"]), - ('clock_gettime', ["time.h"], "RT"), - ('clock_settime', ["time.h"], "RT"), ('ntp_adjtime', ["sys/time.h", "sys/timex.h"]), # BSD ('ntp_gettime', ["sys/time.h", "sys/timex.h"]), # BSD ('res_init', ["netinet/in.h", "arpa/nameser.h", "resolv.h"]), @@ -771,6 +769,21 @@ ctx.define("HAVE_WORKING_FORK", 1, comment="Whether a working fork() exists") + # Does the kernel implement a phase-locked loop for timing? + # All modern Unixes (in particular Linux and *BSD) have this. + # + # The README for the (now deleted) kernel directory says this: + # "If the precision-time kernel (KERNEL_PLL define) is + # configured, the installation process requires the header + # file /usr/include/sys/timex.h for the particular + # architecture to be in place." + # + if ((ctx.get_define("HAVE_SYS_TIMEX_H") and + not ctx.options.disable_kernel_pll)): + ctx.define("HAVE_KERNEL_PLL", 1, + comment="Whether phase-locked loop for timing " + "exists and is enabled") + # SO_REUSEADDR socket option is needed to open a socket on an # interface when the port number is already in use on another # interface. Linux needs this, NetBSD does not, status on